var/home/core/zuul-output/0000755000175000017500000000000015110237567014534 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110244464015472 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000003555075615110244455017716 0ustar rootrootNov 22 04:46:35 crc systemd[1]: Starting Kubernetes Kubelet... Nov 22 04:46:35 crc restorecon[4688]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:35 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 04:46:36 crc restorecon[4688]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 22 04:46:39 crc kubenswrapper[4948]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 04:46:39 crc kubenswrapper[4948]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 22 04:46:39 crc kubenswrapper[4948]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 04:46:39 crc kubenswrapper[4948]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 04:46:39 crc kubenswrapper[4948]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 22 04:46:39 crc kubenswrapper[4948]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.343193 4948 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354130 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354169 4948 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354174 4948 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354178 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354182 4948 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354186 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354191 4948 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354195 4948 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354199 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354205 4948 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354212 4948 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354217 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354222 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354228 4948 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354233 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354239 4948 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354243 4948 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354246 4948 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354250 4948 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354254 4948 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354258 4948 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354262 4948 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354273 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354277 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354282 4948 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354287 4948 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354291 4948 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354295 4948 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354298 4948 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354303 4948 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354308 4948 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354312 4948 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354316 4948 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354320 4948 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354324 4948 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354328 4948 feature_gate.go:330] unrecognized feature gate: Example Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354332 4948 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354336 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354339 4948 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354343 4948 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354347 4948 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354350 4948 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354354 4948 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354358 4948 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354361 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354366 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354370 4948 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354375 4948 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354378 4948 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354382 4948 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354385 4948 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354389 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354393 4948 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354396 4948 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354399 4948 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354403 4948 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354407 4948 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354412 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354416 4948 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354420 4948 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354424 4948 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354428 4948 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354432 4948 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354436 4948 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354439 4948 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354443 4948 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354446 4948 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354451 4948 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354455 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354458 4948 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.354479 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354573 4948 flags.go:64] FLAG: --address="0.0.0.0" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354585 4948 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354593 4948 flags.go:64] FLAG: --anonymous-auth="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354600 4948 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354606 4948 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354611 4948 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354617 4948 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354623 4948 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354628 4948 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354632 4948 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354639 4948 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354644 4948 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354648 4948 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354653 4948 flags.go:64] FLAG: --cgroup-root="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354657 4948 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354661 4948 flags.go:64] FLAG: --client-ca-file="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354666 4948 flags.go:64] FLAG: --cloud-config="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354670 4948 flags.go:64] FLAG: --cloud-provider="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354675 4948 flags.go:64] FLAG: --cluster-dns="[]" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354682 4948 flags.go:64] FLAG: --cluster-domain="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354686 4948 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354690 4948 flags.go:64] FLAG: --config-dir="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354694 4948 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354700 4948 flags.go:64] FLAG: --container-log-max-files="5" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354706 4948 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354710 4948 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354715 4948 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354719 4948 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354724 4948 flags.go:64] FLAG: --contention-profiling="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354728 4948 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354733 4948 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354737 4948 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354741 4948 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354747 4948 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354751 4948 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354755 4948 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354759 4948 flags.go:64] FLAG: --enable-load-reader="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354765 4948 flags.go:64] FLAG: --enable-server="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354769 4948 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354775 4948 flags.go:64] FLAG: --event-burst="100" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354781 4948 flags.go:64] FLAG: --event-qps="50" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354785 4948 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354789 4948 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354794 4948 flags.go:64] FLAG: --eviction-hard="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354799 4948 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354803 4948 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354808 4948 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354812 4948 flags.go:64] FLAG: --eviction-soft="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354816 4948 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354820 4948 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354825 4948 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354829 4948 flags.go:64] FLAG: --experimental-mounter-path="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354833 4948 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354838 4948 flags.go:64] FLAG: --fail-swap-on="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354842 4948 flags.go:64] FLAG: --feature-gates="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354848 4948 flags.go:64] FLAG: --file-check-frequency="20s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354852 4948 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354857 4948 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354861 4948 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354866 4948 flags.go:64] FLAG: --healthz-port="10248" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354870 4948 flags.go:64] FLAG: --help="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354874 4948 flags.go:64] FLAG: --hostname-override="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354878 4948 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354883 4948 flags.go:64] FLAG: --http-check-frequency="20s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354887 4948 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354891 4948 flags.go:64] FLAG: --image-credential-provider-config="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354895 4948 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354899 4948 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354903 4948 flags.go:64] FLAG: --image-service-endpoint="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354908 4948 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354911 4948 flags.go:64] FLAG: --kube-api-burst="100" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354915 4948 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354922 4948 flags.go:64] FLAG: --kube-api-qps="50" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354927 4948 flags.go:64] FLAG: --kube-reserved="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354931 4948 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354935 4948 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354940 4948 flags.go:64] FLAG: --kubelet-cgroups="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354944 4948 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354948 4948 flags.go:64] FLAG: --lock-file="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354952 4948 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354957 4948 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354961 4948 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354968 4948 flags.go:64] FLAG: --log-json-split-stream="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354972 4948 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354976 4948 flags.go:64] FLAG: --log-text-split-stream="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354980 4948 flags.go:64] FLAG: --logging-format="text" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354984 4948 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354991 4948 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354995 4948 flags.go:64] FLAG: --manifest-url="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.354999 4948 flags.go:64] FLAG: --manifest-url-header="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355006 4948 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355010 4948 flags.go:64] FLAG: --max-open-files="1000000" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355016 4948 flags.go:64] FLAG: --max-pods="110" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355021 4948 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355025 4948 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355029 4948 flags.go:64] FLAG: --memory-manager-policy="None" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355034 4948 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355038 4948 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355043 4948 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355047 4948 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355059 4948 flags.go:64] FLAG: --node-status-max-images="50" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355064 4948 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355068 4948 flags.go:64] FLAG: --oom-score-adj="-999" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355072 4948 flags.go:64] FLAG: --pod-cidr="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355076 4948 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355086 4948 flags.go:64] FLAG: --pod-manifest-path="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355090 4948 flags.go:64] FLAG: --pod-max-pids="-1" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355094 4948 flags.go:64] FLAG: --pods-per-core="0" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355098 4948 flags.go:64] FLAG: --port="10250" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355109 4948 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355114 4948 flags.go:64] FLAG: --provider-id="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355118 4948 flags.go:64] FLAG: --qos-reserved="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355123 4948 flags.go:64] FLAG: --read-only-port="10255" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355127 4948 flags.go:64] FLAG: --register-node="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355132 4948 flags.go:64] FLAG: --register-schedulable="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355137 4948 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355146 4948 flags.go:64] FLAG: --registry-burst="10" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355151 4948 flags.go:64] FLAG: --registry-qps="5" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355155 4948 flags.go:64] FLAG: --reserved-cpus="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355159 4948 flags.go:64] FLAG: --reserved-memory="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355166 4948 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355171 4948 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355176 4948 flags.go:64] FLAG: --rotate-certificates="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355180 4948 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355185 4948 flags.go:64] FLAG: --runonce="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355190 4948 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355195 4948 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355199 4948 flags.go:64] FLAG: --seccomp-default="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355203 4948 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355207 4948 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355212 4948 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355217 4948 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355221 4948 flags.go:64] FLAG: --storage-driver-password="root" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355225 4948 flags.go:64] FLAG: --storage-driver-secure="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355230 4948 flags.go:64] FLAG: --storage-driver-table="stats" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355234 4948 flags.go:64] FLAG: --storage-driver-user="root" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355238 4948 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355243 4948 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355247 4948 flags.go:64] FLAG: --system-cgroups="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355252 4948 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355258 4948 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355262 4948 flags.go:64] FLAG: --tls-cert-file="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355266 4948 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355272 4948 flags.go:64] FLAG: --tls-min-version="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355302 4948 flags.go:64] FLAG: --tls-private-key-file="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355307 4948 flags.go:64] FLAG: --topology-manager-policy="none" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355311 4948 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355316 4948 flags.go:64] FLAG: --topology-manager-scope="container" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355320 4948 flags.go:64] FLAG: --v="2" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355327 4948 flags.go:64] FLAG: --version="false" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355332 4948 flags.go:64] FLAG: --vmodule="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355338 4948 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355343 4948 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355449 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355454 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355471 4948 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355479 4948 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355485 4948 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355490 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355494 4948 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355498 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355503 4948 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355507 4948 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355511 4948 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355515 4948 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355519 4948 feature_gate.go:330] unrecognized feature gate: Example Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355522 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355526 4948 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355530 4948 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355535 4948 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355539 4948 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355543 4948 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355547 4948 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355552 4948 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355557 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355561 4948 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355565 4948 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355570 4948 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355573 4948 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355577 4948 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355581 4948 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355585 4948 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355589 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355593 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355597 4948 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355601 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355605 4948 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355609 4948 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355613 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355617 4948 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355620 4948 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355624 4948 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355628 4948 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355633 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355639 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355643 4948 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355647 4948 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355651 4948 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355654 4948 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355658 4948 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355662 4948 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355666 4948 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355669 4948 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355673 4948 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355677 4948 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355680 4948 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355684 4948 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355689 4948 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355693 4948 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355698 4948 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355704 4948 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355708 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355712 4948 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355716 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355719 4948 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355723 4948 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355728 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355733 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355737 4948 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355747 4948 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355751 4948 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355757 4948 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355762 4948 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.355767 4948 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.355780 4948 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.370217 4948 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.370260 4948 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370376 4948 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370391 4948 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370400 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370408 4948 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370417 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370426 4948 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370435 4948 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370443 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370451 4948 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370461 4948 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370503 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370514 4948 feature_gate.go:330] unrecognized feature gate: Example Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370522 4948 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370530 4948 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370538 4948 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370547 4948 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370556 4948 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370565 4948 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370572 4948 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370581 4948 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370589 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370600 4948 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370612 4948 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370621 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370630 4948 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370639 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370649 4948 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370657 4948 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370666 4948 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370674 4948 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370682 4948 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370690 4948 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370699 4948 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370709 4948 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370718 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370726 4948 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370734 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370742 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370753 4948 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370763 4948 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370772 4948 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370781 4948 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370790 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370799 4948 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370807 4948 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370816 4948 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370824 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370831 4948 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370839 4948 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370847 4948 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370856 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370864 4948 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370871 4948 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370879 4948 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370887 4948 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370896 4948 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370906 4948 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370915 4948 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370926 4948 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370933 4948 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370941 4948 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370949 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370957 4948 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370965 4948 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370972 4948 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370980 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370988 4948 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.370996 4948 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371003 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371014 4948 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371023 4948 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.371036 4948 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371290 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371305 4948 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371314 4948 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371322 4948 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371332 4948 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371340 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371349 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371357 4948 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371365 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371373 4948 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371381 4948 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371388 4948 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371397 4948 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371405 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371412 4948 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371420 4948 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371428 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371436 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371444 4948 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371453 4948 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371489 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371501 4948 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371512 4948 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371520 4948 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371528 4948 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371536 4948 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371543 4948 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371551 4948 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371559 4948 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371566 4948 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371574 4948 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371582 4948 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371590 4948 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371598 4948 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371606 4948 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371614 4948 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371621 4948 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371630 4948 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371638 4948 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371646 4948 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371653 4948 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371661 4948 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371669 4948 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371677 4948 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371688 4948 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371699 4948 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371707 4948 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371715 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371725 4948 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371734 4948 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371743 4948 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371751 4948 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371759 4948 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371767 4948 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371775 4948 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371784 4948 feature_gate.go:330] unrecognized feature gate: Example Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371795 4948 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371805 4948 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371814 4948 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371825 4948 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371834 4948 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371844 4948 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371853 4948 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371861 4948 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371870 4948 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371879 4948 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371887 4948 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371895 4948 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371903 4948 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371912 4948 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.371922 4948 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.371934 4948 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.374856 4948 server.go:940] "Client rotation is on, will bootstrap in background" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.410075 4948 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.410180 4948 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.411738 4948 server.go:997] "Starting client certificate rotation" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.411766 4948 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.411967 4948 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-13 21:58:58.330145705 +0000 UTC Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.412067 4948 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1265h12m18.918081443s for next certificate rotation Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.441377 4948 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.446990 4948 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.497228 4948 log.go:25] "Validated CRI v1 runtime API" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.588702 4948 log.go:25] "Validated CRI v1 image API" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.595143 4948 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.607286 4948 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-22-04-42-27-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.607339 4948 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.632184 4948 manager.go:217] Machine: {Timestamp:2025-11-22 04:46:39.6300135 +0000 UTC m=+2.316024056 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654120448 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:52e0db1d-3891-41f9-818e-4b9385ad1108 BootID:662538c3-26b0-4a27-a0b1-8418c7cea741 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:dd:f8:43 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:dd:f8:43 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:4b:8d:b4 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:b1:06:2e Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:6e:22:b2 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:52:04:76 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:1a:44:15:55:f3:46 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:82:24:7b:bf:ed:0f Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654120448 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.632487 4948 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.632717 4948 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.634392 4948 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.634637 4948 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.634691 4948 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.641614 4948 topology_manager.go:138] "Creating topology manager with none policy" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.641640 4948 container_manager_linux.go:303] "Creating device plugin manager" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.642127 4948 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.642149 4948 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.642412 4948 state_mem.go:36] "Initialized new in-memory state store" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.642556 4948 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.648942 4948 kubelet.go:418] "Attempting to sync node with API server" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.648968 4948 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.648989 4948 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.649005 4948 kubelet.go:324] "Adding apiserver pod source" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.649023 4948 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.657189 4948 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.659576 4948 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.660150 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.660376 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.660411 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.660650 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.663059 4948 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664656 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664686 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664695 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664704 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664719 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664727 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664733 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664749 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664758 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664767 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664806 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.664813 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.667802 4948 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.671718 4948 server.go:1280] "Started kubelet" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.671840 4948 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.672209 4948 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.672694 4948 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.672863 4948 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 22 04:46:39 crc systemd[1]: Started Kubernetes Kubelet. Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.680567 4948 server.go:460] "Adding debug handlers to kubelet server" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.680601 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.680668 4948 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.680745 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 13:34:37.794801976 +0000 UTC Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.680849 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 848h47m58.113958695s for next certificate rotation Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.680945 4948 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.680970 4948 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.681036 4948 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.681113 4948 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.682415 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.682733 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.683833 4948 factory.go:55] Registering systemd factory Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.683865 4948 factory.go:221] Registration of the systemd container factory successfully Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.684512 4948 factory.go:153] Registering CRI-O factory Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.684559 4948 factory.go:221] Registration of the crio container factory successfully Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.684678 4948 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.684709 4948 factory.go:103] Registering Raw factory Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.684730 4948 manager.go:1196] Started watching for new ooms in manager Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.685019 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="200ms" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.685436 4948 manager.go:319] Starting recovery of all containers Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.684412 4948 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.223:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a3abfd5f39730 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 04:46:39.671646 +0000 UTC m=+2.357656526,LastTimestamp:2025-11-22 04:46:39.671646 +0000 UTC m=+2.357656526,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692758 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692823 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692838 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692852 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692863 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692874 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692888 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692902 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692917 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692930 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692944 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.692988 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693002 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693016 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693028 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693077 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693090 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693101 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693113 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693124 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693136 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693147 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693160 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693171 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693184 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693196 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693213 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693225 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693237 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693249 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693260 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693271 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693314 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693326 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693336 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693369 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693380 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693391 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693404 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693416 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693427 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693440 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693451 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693477 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693489 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693503 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693514 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693536 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693557 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693572 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693587 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693602 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693623 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693641 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693657 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693672 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693688 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693704 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693720 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693737 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693752 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693765 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693778 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693793 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693885 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693898 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693910 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693924 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693934 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693945 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693955 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693968 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.693978 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695708 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695752 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695784 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695800 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695819 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695845 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695860 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695874 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695897 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695914 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695938 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695955 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695974 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.695995 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696009 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696029 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696046 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696061 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696080 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696096 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696114 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696131 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696147 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696166 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696180 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696200 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696216 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696231 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696248 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696262 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696280 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696340 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696364 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696384 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696401 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696420 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696438 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696477 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696499 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.696517 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698042 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698125 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698180 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698221 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698268 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698309 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698356 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698392 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698433 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698521 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698566 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698601 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698649 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.698683 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.699023 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.699133 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.699221 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.705810 4948 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706280 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706309 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706324 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706337 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706350 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706364 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706383 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706396 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706411 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706431 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706448 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706498 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706517 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706531 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706547 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706563 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706579 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706597 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706612 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706624 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706637 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706653 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706664 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706679 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706694 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706709 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706723 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706736 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706751 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706766 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706781 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706796 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706840 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706855 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706869 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706884 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706900 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706915 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706928 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706940 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706952 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706967 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706981 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.706994 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707008 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707022 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707038 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707052 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707068 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707086 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707104 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707116 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707131 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707148 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707164 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707181 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707197 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707216 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707232 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707257 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707274 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707292 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707308 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707324 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707340 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707356 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707373 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707390 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707406 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707423 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707441 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707480 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707501 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707519 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707535 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707550 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707568 4948 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707582 4948 reconstruct.go:97] "Volume reconstruction finished" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.707593 4948 reconciler.go:26] "Reconciler: start to sync state" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.716865 4948 manager.go:324] Recovery completed Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.733889 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.736039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.736087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.736101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.737396 4948 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.737431 4948 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.737456 4948 state_mem.go:36] "Initialized new in-memory state store" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.743737 4948 policy_none.go:49] "None policy: Start" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.747121 4948 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.747172 4948 state_mem.go:35] "Initializing new in-memory state store" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.754120 4948 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.756702 4948 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.756760 4948 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.756801 4948 kubelet.go:2335] "Starting kubelet main sync loop" Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.756859 4948 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 22 04:46:39 crc kubenswrapper[4948]: W1122 04:46:39.760156 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.760309 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.781833 4948 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.808703 4948 manager.go:334] "Starting Device Plugin manager" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.808783 4948 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.808801 4948 server.go:79] "Starting device plugin registration server" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.809538 4948 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.809561 4948 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.810344 4948 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.810590 4948 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.810699 4948 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.821039 4948 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.857341 4948 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.857519 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.861365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.861414 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.861426 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.861632 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.861938 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.861992 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862511 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862548 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862560 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862707 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862746 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862760 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862852 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.862881 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.863763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.863831 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.863852 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.863974 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.863994 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.864006 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.864161 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.864264 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.864292 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.865651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.865670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.865678 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.865795 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866160 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866194 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866360 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866399 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866954 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866986 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.867003 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.866981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.867086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.867115 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.867304 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.867366 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.869116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.869175 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.869190 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.886787 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="400ms" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.909993 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911143 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911184 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911199 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911231 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911676 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911709 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911730 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911752 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911772 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911788 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911805 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: E1122 04:46:39.911791 4948 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911826 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911867 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911925 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911943 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911963 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.911986 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.912005 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:39 crc kubenswrapper[4948]: I1122 04:46:39.912054 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013057 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013151 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013180 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013203 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013246 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013300 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013330 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013370 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013428 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013499 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013393 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013511 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013484 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013394 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013597 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013628 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013636 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013649 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013687 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013700 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013721 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013746 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013769 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013798 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013818 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013833 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013836 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013868 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013884 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.013857 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.112804 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.114533 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.114716 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.114883 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.115039 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 04:46:40 crc kubenswrapper[4948]: E1122 04:46:40.115724 4948 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.189022 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.196834 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.214417 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.235125 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: W1122 04:46:40.236360 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-26e1259d07c15f3fa5b3d9bdad296af6a35a39fb6940cb1aac1efb48abac187b WatchSource:0}: Error finding container 26e1259d07c15f3fa5b3d9bdad296af6a35a39fb6940cb1aac1efb48abac187b: Status 404 returned error can't find the container with id 26e1259d07c15f3fa5b3d9bdad296af6a35a39fb6940cb1aac1efb48abac187b Nov 22 04:46:40 crc kubenswrapper[4948]: W1122 04:46:40.236765 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-3dd48abb69e1e20ae1ea95a1731fa1d8c4888736368f0c927da00dcf135c23ce WatchSource:0}: Error finding container 3dd48abb69e1e20ae1ea95a1731fa1d8c4888736368f0c927da00dcf135c23ce: Status 404 returned error can't find the container with id 3dd48abb69e1e20ae1ea95a1731fa1d8c4888736368f0c927da00dcf135c23ce Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.241264 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:40 crc kubenswrapper[4948]: W1122 04:46:40.247579 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-7dc51c02dd2e97ee49677e075ee5eaa3f6d5ae8f32f635dbe7f6d937b6cc18af WatchSource:0}: Error finding container 7dc51c02dd2e97ee49677e075ee5eaa3f6d5ae8f32f635dbe7f6d937b6cc18af: Status 404 returned error can't find the container with id 7dc51c02dd2e97ee49677e075ee5eaa3f6d5ae8f32f635dbe7f6d937b6cc18af Nov 22 04:46:40 crc kubenswrapper[4948]: W1122 04:46:40.260238 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-28790ce2d14806432783c80f3bfc6dce84fd438fc4f893faaecf9663a1a62bac WatchSource:0}: Error finding container 28790ce2d14806432783c80f3bfc6dce84fd438fc4f893faaecf9663a1a62bac: Status 404 returned error can't find the container with id 28790ce2d14806432783c80f3bfc6dce84fd438fc4f893faaecf9663a1a62bac Nov 22 04:46:40 crc kubenswrapper[4948]: W1122 04:46:40.261491 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-e9915c0fecbdbafd48a75bb4b6a084221d5248792fb7f998561d0d41c5e3f6f5 WatchSource:0}: Error finding container e9915c0fecbdbafd48a75bb4b6a084221d5248792fb7f998561d0d41c5e3f6f5: Status 404 returned error can't find the container with id e9915c0fecbdbafd48a75bb4b6a084221d5248792fb7f998561d0d41c5e3f6f5 Nov 22 04:46:40 crc kubenswrapper[4948]: E1122 04:46:40.288353 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="800ms" Nov 22 04:46:40 crc kubenswrapper[4948]: W1122 04:46:40.500611 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:40 crc kubenswrapper[4948]: E1122 04:46:40.500727 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.516230 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.517787 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.517847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.517866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.517900 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 04:46:40 crc kubenswrapper[4948]: E1122 04:46:40.518525 4948 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.674161 4948 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.763241 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7dc51c02dd2e97ee49677e075ee5eaa3f6d5ae8f32f635dbe7f6d937b6cc18af"} Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.764524 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"3dd48abb69e1e20ae1ea95a1731fa1d8c4888736368f0c927da00dcf135c23ce"} Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.765759 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"26e1259d07c15f3fa5b3d9bdad296af6a35a39fb6940cb1aac1efb48abac187b"} Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.767660 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"28790ce2d14806432783c80f3bfc6dce84fd438fc4f893faaecf9663a1a62bac"} Nov 22 04:46:40 crc kubenswrapper[4948]: I1122 04:46:40.769158 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e9915c0fecbdbafd48a75bb4b6a084221d5248792fb7f998561d0d41c5e3f6f5"} Nov 22 04:46:40 crc kubenswrapper[4948]: W1122 04:46:40.879619 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:40 crc kubenswrapper[4948]: E1122 04:46:40.880046 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:41 crc kubenswrapper[4948]: W1122 04:46:41.035253 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:41 crc kubenswrapper[4948]: E1122 04:46:41.035364 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:41 crc kubenswrapper[4948]: E1122 04:46:41.089445 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="1.6s" Nov 22 04:46:41 crc kubenswrapper[4948]: W1122 04:46:41.143157 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:41 crc kubenswrapper[4948]: E1122 04:46:41.143238 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.319481 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.321782 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.321820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.321829 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.321853 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 04:46:41 crc kubenswrapper[4948]: E1122 04:46:41.322490 4948 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.673655 4948 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.774765 4948 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87" exitCode=0 Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.774891 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.774923 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87"} Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.775839 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.775877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.775888 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.778046 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867"} Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.778089 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42"} Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.778100 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779"} Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.779752 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251" exitCode=0 Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.779824 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251"} Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.779946 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.781155 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.781198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.781215 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.781686 4948 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="71801968e9fb356bbf9c14133a38c0a16c9d51857b3a8c8c81db6e65b803fde3" exitCode=0 Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.781740 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"71801968e9fb356bbf9c14133a38c0a16c9d51857b3a8c8c81db6e65b803fde3"} Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.781795 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.783176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.783200 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.783210 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.783840 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.784376 4948 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="d4cf0871be059cf435b4c1060a6c032c87a8c388bbd9ee2f114fc1528764c5dd" exitCode=0 Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.784429 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"d4cf0871be059cf435b4c1060a6c032c87a8c388bbd9ee2f114fc1528764c5dd"} Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.784526 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.789101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.789157 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.789178 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.789293 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.789582 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:41 crc kubenswrapper[4948]: I1122 04:46:41.789603 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:42 crc kubenswrapper[4948]: W1122 04:46:42.383972 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:42 crc kubenswrapper[4948]: E1122 04:46:42.384077 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.673827 4948 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:42 crc kubenswrapper[4948]: E1122 04:46:42.691563 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.223:6443: connect: connection refused" interval="3.2s" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.790690 4948 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a1fe78939b1ef42318fec4d8b7aa917284e34f7616e863faa83305b43b457fc8" exitCode=0 Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.790809 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a1fe78939b1ef42318fec4d8b7aa917284e34f7616e863faa83305b43b457fc8"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.790852 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.791832 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.791868 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.791879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.793057 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.793179 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"65c8ccb7e7f0f0b46b13e82efb731c8cd06abbcf1924fe826ecb4a619efaa821"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.794224 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.794256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.794265 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.796637 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.796664 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.796677 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.796744 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.800735 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.800768 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.800780 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.803849 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.803905 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.805146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.805179 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.805191 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.807189 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.807216 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.807227 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.807237 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb"} Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.807328 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.808134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.808154 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.808162 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.923455 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.925596 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.925648 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.925662 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:42 crc kubenswrapper[4948]: I1122 04:46:42.925702 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 04:46:42 crc kubenswrapper[4948]: E1122 04:46:42.926507 4948 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.223:6443: connect: connection refused" node="crc" Nov 22 04:46:42 crc kubenswrapper[4948]: W1122 04:46:42.953830 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:42 crc kubenswrapper[4948]: E1122 04:46:42.953930 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:43 crc kubenswrapper[4948]: W1122 04:46:43.483379 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:43 crc kubenswrapper[4948]: E1122 04:46:43.483547 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.674246 4948 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:43 crc kubenswrapper[4948]: W1122 04:46:43.808865 4948 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.223:6443: connect: connection refused Nov 22 04:46:43 crc kubenswrapper[4948]: E1122 04:46:43.808999 4948 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.223:6443: connect: connection refused" logger="UnhandledError" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.811905 4948 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="51b82e08f611cea37b2b5542faad3c64f8d264b5cf225158d872af04d0945969" exitCode=0 Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.811985 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"51b82e08f611cea37b2b5542faad3c64f8d264b5cf225158d872af04d0945969"} Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.812063 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.813300 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.813990 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.814051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.814086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.815296 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741" exitCode=255 Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.815379 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.815398 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.815412 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741"} Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.815499 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.816016 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.815404 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.816611 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.816696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.816717 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817347 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817371 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817403 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817415 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817327 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817496 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:43 crc kubenswrapper[4948]: I1122 04:46:43.817694 4948 scope.go:117] "RemoveContainer" containerID="fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.661684 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.674141 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.737194 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.825021 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fff8e30241b4b8ea84db537c47dfa7905c43b01714027f52a7293ce9cfc2750a"} Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.825079 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d4b09fbab9c83fad5caa3d426e0fc2ecb8c565caa39be4e912076b865889a5d3"} Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.825091 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"75b182ef7f6efcbe9de9a5540f1f3ce3f826d68c102031c1f1027b0bbdd68e82"} Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.825102 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d62fb431b4d87a66f9412a86cdaed64462dfac6e2d89d0f6e3df829ce5bb7490"} Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.828581 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.835452 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.835517 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.835503 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea"} Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.835484 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.836578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.836601 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.836609 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.836578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.836673 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:44 crc kubenswrapper[4948]: I1122 04:46:44.836685 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.049137 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.844217 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b13d3494f0d35a8772fe1cf1e8379f2bd03193e53fa0ca221d66e2b3e2f79149"} Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.844305 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.844323 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.844371 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.844388 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.844373 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.846038 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.846090 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.846109 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.846898 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.846961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.846973 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.847181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.847219 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:45 crc kubenswrapper[4948]: I1122 04:46:45.847239 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.127390 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.129103 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.129155 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.129174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.129207 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.847581 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.847648 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.847598 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.849104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.849137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.849150 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.849177 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.849219 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.849238 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.872121 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.872421 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.873930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.873982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:46 crc kubenswrapper[4948]: I1122 04:46:46.874001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.335780 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.336037 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.337586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.337653 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.337676 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.907454 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.907687 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.909322 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.909385 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:48 crc kubenswrapper[4948]: I1122 04:46:48.909396 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:49 crc kubenswrapper[4948]: I1122 04:46:49.019994 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:46:49 crc kubenswrapper[4948]: I1122 04:46:49.020309 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:49 crc kubenswrapper[4948]: I1122 04:46:49.022190 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:49 crc kubenswrapper[4948]: I1122 04:46:49.022262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:49 crc kubenswrapper[4948]: I1122 04:46:49.022282 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:49 crc kubenswrapper[4948]: E1122 04:46:49.821204 4948 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.615949 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.616172 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.617841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.617899 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.617918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.622658 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.860093 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.862094 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.862143 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:50 crc kubenswrapper[4948]: I1122 04:46:50.862159 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:51 crc kubenswrapper[4948]: I1122 04:46:51.673399 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:46:51 crc kubenswrapper[4948]: I1122 04:46:51.863308 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:51 crc kubenswrapper[4948]: I1122 04:46:51.864814 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:51 crc kubenswrapper[4948]: I1122 04:46:51.864849 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:51 crc kubenswrapper[4948]: I1122 04:46:51.864858 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:54 crc kubenswrapper[4948]: E1122 04:46:54.173701 4948 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.187a3abfd5f39730 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 04:46:39.671646 +0000 UTC m=+2.357656526,LastTimestamp:2025-11-22 04:46:39.671646 +0000 UTC m=+2.357656526,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 04:46:54 crc kubenswrapper[4948]: I1122 04:46:54.487375 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 04:46:54 crc kubenswrapper[4948]: I1122 04:46:54.487446 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 04:46:54 crc kubenswrapper[4948]: I1122 04:46:54.522325 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 04:46:54 crc kubenswrapper[4948]: I1122 04:46:54.522406 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 04:46:54 crc kubenswrapper[4948]: I1122 04:46:54.673752 4948 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 04:46:54 crc kubenswrapper[4948]: I1122 04:46:54.673844 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.058428 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]log ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]etcd ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/generic-apiserver-start-informers ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/priority-and-fairness-filter ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-apiextensions-informers ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-apiextensions-controllers ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/crd-informer-synced ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-system-namespaces-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 22 04:46:55 crc kubenswrapper[4948]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 22 04:46:55 crc kubenswrapper[4948]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/bootstrap-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/start-kube-aggregator-informers ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/apiservice-registration-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/apiservice-discovery-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]autoregister-completion ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/apiservice-openapi-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 22 04:46:55 crc kubenswrapper[4948]: livez check failed Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.058555 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.093154 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.094134 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.096559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.096636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.096662 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.182792 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.873379 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.874571 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.874604 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.874615 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:55 crc kubenswrapper[4948]: I1122 04:46:55.914771 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 22 04:46:56 crc kubenswrapper[4948]: I1122 04:46:56.875746 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:46:56 crc kubenswrapper[4948]: I1122 04:46:56.876996 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:46:56 crc kubenswrapper[4948]: I1122 04:46:56.877020 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:46:56 crc kubenswrapper[4948]: I1122 04:46:56.877027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.519147 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.522327 4948 trace.go:236] Trace[285984104]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 04:46:49.501) (total time: 10020ms): Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[285984104]: ---"Objects listed" error: 10020ms (04:46:59.522) Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[285984104]: [10.020879922s] [10.020879922s] END Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.522355 4948 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.522534 4948 trace.go:236] Trace[296450672]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 04:46:46.961) (total time: 12561ms): Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[296450672]: ---"Objects listed" error: 12561ms (04:46:59.522) Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[296450672]: [12.561442142s] [12.561442142s] END Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.522544 4948 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.523343 4948 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.523486 4948 trace.go:236] Trace[160919357]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 04:46:47.110) (total time: 12412ms): Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[160919357]: ---"Objects listed" error: 12412ms (04:46:59.523) Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[160919357]: [12.412442493s] [12.412442493s] END Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.523535 4948 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.524088 4948 trace.go:236] Trace[911893392]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 04:46:47.043) (total time: 12480ms): Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[911893392]: ---"Objects listed" error: 12480ms (04:46:59.523) Nov 22 04:46:59 crc kubenswrapper[4948]: Trace[911893392]: [12.480704413s] [12.480704413s] END Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.524124 4948 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.524758 4948 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.664318 4948 apiserver.go:52] "Watching apiserver" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.669667 4948 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.670343 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.670926 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.671057 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.671179 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.671242 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.671264 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.671315 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.671363 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.671739 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.671844 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.673446 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.673962 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.674028 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.674073 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.679111 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.679282 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.679405 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.679497 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.679865 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.686585 4948 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.722675 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725357 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725489 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725550 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725589 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725647 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725701 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725736 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725783 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725814 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725861 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725892 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725941 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.725970 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726015 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726045 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726093 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726116 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726148 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726127 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726116 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726216 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726217 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726250 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726298 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726347 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726378 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726402 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726427 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726444 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726485 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726482 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726521 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726505 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726600 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726624 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726650 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726669 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726686 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726702 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726720 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726738 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726754 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726770 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726786 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726804 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726820 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726838 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726854 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726877 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726893 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726910 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726955 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726973 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726991 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727008 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727026 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727043 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727061 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727079 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727096 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727113 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727134 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727152 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727170 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727188 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727208 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727226 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727242 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727274 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727290 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726509 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726546 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726557 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726713 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726868 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726958 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727007 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727060 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727136 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727270 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727288 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727338 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727526 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727622 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727713 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727745 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727932 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728002 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728016 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728051 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728776 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728199 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728569 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.726499 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728815 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.728925 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.729049 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.729104 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.729115 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.729228 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.730248 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731015 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731037 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731244 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731398 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.727308 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731532 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731563 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731590 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731619 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731645 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731670 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731696 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731719 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731739 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731766 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731792 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731820 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731841 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731865 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731887 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731907 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731933 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731958 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.731978 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732003 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732024 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732049 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732069 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732094 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732116 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732135 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732158 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732180 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732201 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732221 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732242 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732262 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732285 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732306 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732324 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732393 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732413 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732434 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732458 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732505 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732525 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732517 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732551 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732753 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.732798 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.733193 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734457 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734538 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734587 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734618 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734821 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734852 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734912 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734940 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734982 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735007 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735048 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735098 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735139 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735163 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735208 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735231 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735255 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735296 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735324 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735364 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735388 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735412 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735454 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735509 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735529 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735549 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735590 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735617 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735661 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735685 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735711 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735765 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735791 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735839 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735868 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735916 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735943 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735991 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736114 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736166 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736198 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736244 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736274 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736324 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736350 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736395 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736422 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736489 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736518 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736558 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736583 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736623 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.736650 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737265 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737301 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737347 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737369 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737392 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737606 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737631 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737988 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738019 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738040 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738083 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738103 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738123 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738161 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738182 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738201 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738238 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738284 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738321 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738345 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738362 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738397 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738417 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738434 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738486 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738506 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738524 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739641 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739692 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739714 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739738 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739778 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739801 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739820 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739929 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739961 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739989 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740028 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740053 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740088 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740108 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740131 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740353 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740802 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.742977 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743023 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743079 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743103 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743147 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743263 4948 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743276 4948 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743306 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743318 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743329 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743342 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743354 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743380 4948 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743391 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743402 4948 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743412 4948 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743422 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743434 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743459 4948 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743484 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743496 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743506 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743535 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743545 4948 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743556 4948 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743568 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743578 4948 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743588 4948 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743622 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743634 4948 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743645 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743655 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743664 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743675 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743703 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743714 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743723 4948 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743733 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743744 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743755 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743779 4948 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743791 4948 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743802 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743811 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743822 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743832 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743858 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743869 4948 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743880 4948 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.733757 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734641 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.734801 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.735089 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737145 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.744210 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737157 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737632 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.737680 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738122 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738166 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.738605 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.739615 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.740133 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.741040 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.741305 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.741445 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.742113 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.742279 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.742365 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743494 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743545 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743678 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743791 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743884 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.743892 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.744054 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.744099 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.744152 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.744518 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.744531 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.744740 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.745045 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.745456 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.745537 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.745563 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.745746 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:47:00.24571772 +0000 UTC m=+22.931728236 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.745987 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.746447 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.746885 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.747083 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.747489 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.747582 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.747712 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.746128 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748100 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748170 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748208 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748303 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748227 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748703 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748900 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.748990 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749059 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749104 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749193 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749406 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749510 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749479 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749647 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.749986 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.750426 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.750879 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.751021 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.751162 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.751395 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.752248 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.752590 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.752722 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.752734 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.754582 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.754617 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.754657 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:00.254640311 +0000 UTC m=+22.940650827 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.755507 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.755678 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.755968 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.756105 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.756219 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.756506 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.756569 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.756664 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.756753 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:00.25672777 +0000 UTC m=+22.942738286 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757219 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757306 4948 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757356 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757485 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757561 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757889 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757670 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.757677 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.758201 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.758387 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.758457 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.758625 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.758653 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.759886 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.761037 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.761737 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.762023 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.762286 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.762586 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.762983 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.763342 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.763386 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.765254 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.765348 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.765679 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.765707 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.765722 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.765798 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:00.265771974 +0000 UTC m=+22.951782490 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.766013 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.766647 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.767553 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.770285 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.770321 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.771458 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.771995 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.772799 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.774536 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.776755 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.776839 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.777382 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.777394 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.778154 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.778759 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.778764 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.778907 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.779543 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.779692 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.779633 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.779620 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.779891 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.780006 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.780068 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.780137 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.780591 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.781219 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.781150 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.781532 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.782024 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.782141 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.782221 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.782569 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.782869 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.782858 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.783157 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.783551 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784200 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.784256 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.783855 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784114 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.784279 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:46:59 crc kubenswrapper[4948]: E1122 04:46:59.784349 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:00.284327114 +0000 UTC m=+22.970337630 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784212 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784637 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784762 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784853 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784909 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.784773 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.785809 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.786171 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.789582 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.790358 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.791100 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.791206 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.793646 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.794732 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.795353 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.795420 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.795559 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.795826 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.795892 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.797043 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.797789 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.799037 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.799571 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.800878 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.803326 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.804128 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.806102 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.807026 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.808083 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.809177 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.810011 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.810619 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.812259 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.813703 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.813923 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.814916 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.816351 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.817088 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.819652 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.821196 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.828850 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.830075 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.835636 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.839481 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.840566 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.841829 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.843325 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.843815 4948 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.844248 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.844340 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845240 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845267 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845279 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845298 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845335 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845350 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845361 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845378 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845388 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845397 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845407 4948 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845422 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845433 4948 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845445 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845455 4948 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845482 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845497 4948 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845507 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845519 4948 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845529 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845539 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845549 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845648 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845669 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.845926 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.848977 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.849351 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.849399 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.849424 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.849437 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.849450 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.849470 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.849985 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850309 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850350 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850367 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850384 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850397 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850417 4948 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850430 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850442 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850457 4948 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850493 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850506 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850517 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850533 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850546 4948 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850558 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850930 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.850984 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851005 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851024 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851036 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851048 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851060 4948 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851074 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851087 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851104 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851118 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851132 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851143 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851155 4948 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851169 4948 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851180 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851208 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851233 4948 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851246 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851287 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851303 4948 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851315 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851326 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851341 4948 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851352 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851364 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851489 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851502 4948 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851535 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851546 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851559 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851603 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851615 4948 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851627 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851642 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851653 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851663 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851673 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851686 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851696 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851707 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851720 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851731 4948 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851742 4948 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851755 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851769 4948 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851781 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851791 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851804 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851817 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851827 4948 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851839 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851849 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851863 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851874 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851884 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851896 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851908 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851917 4948 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851929 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851941 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851952 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851962 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851972 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851986 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.851996 4948 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852008 4948 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852021 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852031 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852041 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852054 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852067 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852076 4948 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852086 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852096 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852114 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852128 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852142 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852155 4948 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852168 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852178 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852187 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852199 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852213 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852223 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852233 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852246 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852260 4948 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852270 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852281 4948 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852305 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852317 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852327 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852339 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852349 4948 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852360 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852371 4948 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852384 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852395 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852405 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852417 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852435 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852449 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852482 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852493 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852507 4948 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.852886 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.855715 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.855844 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.856809 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.858860 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.860439 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.860844 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.861340 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.862521 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.863311 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.864569 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.865375 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.872061 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.872931 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.873307 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.875636 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.877538 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.878187 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.879018 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.880164 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.881110 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.883227 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.883982 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.884067 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.893361 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.903382 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.918560 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.922174 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:42590->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.922241 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:42590->192.168.126.11:17697: read: connection reset by peer" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.922264 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": EOF" start-of-body= Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.922321 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": EOF" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.932394 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.946478 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.953811 4948 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.953845 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.988341 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 04:46:59 crc kubenswrapper[4948]: I1122 04:46:59.996629 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.006415 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 04:47:00 crc kubenswrapper[4948]: W1122 04:47:00.017908 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-3d958e2fe34afc5feb93eb917a1a08cbaba7b7be6280183ea05e008f03731105 WatchSource:0}: Error finding container 3d958e2fe34afc5feb93eb917a1a08cbaba7b7be6280183ea05e008f03731105: Status 404 returned error can't find the container with id 3d958e2fe34afc5feb93eb917a1a08cbaba7b7be6280183ea05e008f03731105 Nov 22 04:47:00 crc kubenswrapper[4948]: W1122 04:47:00.025996 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-e8f187901980cb5634883e25945116d237cac9e53326dd723e31da0f6aa98496 WatchSource:0}: Error finding container e8f187901980cb5634883e25945116d237cac9e53326dd723e31da0f6aa98496: Status 404 returned error can't find the container with id e8f187901980cb5634883e25945116d237cac9e53326dd723e31da0f6aa98496 Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.055095 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.056373 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.056456 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.062561 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.073362 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.090139 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.101857 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.102898 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-xfvlb"] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.103259 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.106247 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.109122 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.111881 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.113637 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.127834 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.138657 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.152975 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.164751 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.170093 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.179318 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.196263 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.234435 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.255592 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.255694 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a357c8ff-8016-42b1-80b6-1ead105abc52-hosts-file\") pod \"node-resolver-xfvlb\" (UID: \"a357c8ff-8016-42b1-80b6-1ead105abc52\") " pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.255809 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:47:01.255762793 +0000 UTC m=+23.941773309 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.255876 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.255977 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlrsd\" (UniqueName: \"kubernetes.io/projected/a357c8ff-8016-42b1-80b6-1ead105abc52-kube-api-access-nlrsd\") pod \"node-resolver-xfvlb\" (UID: \"a357c8ff-8016-42b1-80b6-1ead105abc52\") " pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.256012 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.256097 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:01.256073072 +0000 UTC m=+23.942083588 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.266288 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.281625 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.356619 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.356694 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlrsd\" (UniqueName: \"kubernetes.io/projected/a357c8ff-8016-42b1-80b6-1ead105abc52-kube-api-access-nlrsd\") pod \"node-resolver-xfvlb\" (UID: \"a357c8ff-8016-42b1-80b6-1ead105abc52\") " pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.356737 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.356770 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a357c8ff-8016-42b1-80b6-1ead105abc52-hosts-file\") pod \"node-resolver-xfvlb\" (UID: \"a357c8ff-8016-42b1-80b6-1ead105abc52\") " pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.356806 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.356897 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357017 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357043 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:01.357011517 +0000 UTC m=+24.043022033 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357049 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357082 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357114 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357126 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:01.35711756 +0000 UTC m=+24.043128276 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357134 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357155 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.357199 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a357c8ff-8016-42b1-80b6-1ead105abc52-hosts-file\") pod \"node-resolver-xfvlb\" (UID: \"a357c8ff-8016-42b1-80b6-1ead105abc52\") " pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.357233 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:01.357207932 +0000 UTC m=+24.043218448 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.377540 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlrsd\" (UniqueName: \"kubernetes.io/projected/a357c8ff-8016-42b1-80b6-1ead105abc52-kube-api-access-nlrsd\") pod \"node-resolver-xfvlb\" (UID: \"a357c8ff-8016-42b1-80b6-1ead105abc52\") " pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.420442 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xfvlb" Nov 22 04:47:00 crc kubenswrapper[4948]: W1122 04:47:00.432902 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda357c8ff_8016_42b1_80b6_1ead105abc52.slice/crio-13f76f8c4b411bc91cace4c5775348cdf501f69b98c7bd71ad8ec9d6e5ae42d9 WatchSource:0}: Error finding container 13f76f8c4b411bc91cace4c5775348cdf501f69b98c7bd71ad8ec9d6e5ae42d9: Status 404 returned error can't find the container with id 13f76f8c4b411bc91cace4c5775348cdf501f69b98c7bd71ad8ec9d6e5ae42d9 Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.495946 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-pf8gx"] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.496350 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bspvz"] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.496624 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.508896 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-mw95l"] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.509133 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.509180 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.509214 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-kx6rn"] Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.509359 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.510774 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.510869 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.510789 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.511194 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.511446 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.514545 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.514857 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.515614 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.516618 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.523297 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.523695 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.524068 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.524267 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.524357 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.524549 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.524590 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.524762 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.524960 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.525145 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.541282 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.553842 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.568840 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.577157 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.591766 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:43Z\\\",\\\"message\\\":\\\"W1122 04:46:42.963332 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 04:46:42.963883 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763786802 cert, and key in /tmp/serving-cert-3444382197/serving-signer.crt, /tmp/serving-cert-3444382197/serving-signer.key\\\\nI1122 04:46:43.251870 1 observer_polling.go:159] Starting file observer\\\\nW1122 04:46:43.259548 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 04:46:43.259818 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 04:46:43.261168 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3444382197/tls.crt::/tmp/serving-cert-3444382197/tls.key\\\\\\\"\\\\nF1122 04:46:43.633056 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.611587 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.622835 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.633713 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.645549 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.658792 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659076 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-systemd-units\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659173 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-systemd\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659255 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cnibin\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659339 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-cnibin\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659418 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-conf-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659515 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-etc-kubernetes\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659620 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-daemon-config\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659727 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659897 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-script-lib\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.659981 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-os-release\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660068 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-k8s-cni-cncf-io\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-slash\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660208 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-netns\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660385 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-system-cni-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660489 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-ovn-kubernetes\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660517 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-config\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660540 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660561 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660584 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-multus-certs\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660603 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-log-socket\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660625 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cni-binary-copy\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660661 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-socket-dir-parent\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660683 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bad3107e-91a9-463d-b981-fb102616bdbe-ovn-node-metrics-cert\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660702 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-etc-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660721 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-bin\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660743 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-env-overrides\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660762 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/126f010b-a640-4133-b63f-d2976da99215-rootfs\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660782 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-cni-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660825 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/126f010b-a640-4133-b63f-d2976da99215-mcd-auth-proxy-config\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660845 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-system-cni-dir\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660865 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vkzw\" (UniqueName: \"kubernetes.io/projected/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-kube-api-access-9vkzw\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660890 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7a2e6333-2885-4eaf-a4b3-6613127e6375-cni-binary-copy\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660911 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-kubelet\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660931 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-netd\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660953 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-netns\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660969 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.660992 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrcwm\" (UniqueName: \"kubernetes.io/projected/bad3107e-91a9-463d-b981-fb102616bdbe-kube-api-access-hrcwm\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661026 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-cni-multus\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661049 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnwvs\" (UniqueName: \"kubernetes.io/projected/7a2e6333-2885-4eaf-a4b3-6613127e6375-kube-api-access-mnwvs\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661071 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-node-log\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661093 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phk4m\" (UniqueName: \"kubernetes.io/projected/126f010b-a640-4133-b63f-d2976da99215-kube-api-access-phk4m\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661110 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-os-release\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661143 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-cni-bin\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661160 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-hostroot\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661184 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-var-lib-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661210 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-kubelet\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661226 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-ovn\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.661243 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/126f010b-a640-4133-b63f-d2976da99215-proxy-tls\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.671320 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.682170 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.694918 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:43Z\\\",\\\"message\\\":\\\"W1122 04:46:42.963332 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 04:46:42.963883 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763786802 cert, and key in /tmp/serving-cert-3444382197/serving-signer.crt, /tmp/serving-cert-3444382197/serving-signer.key\\\\nI1122 04:46:43.251870 1 observer_polling.go:159] Starting file observer\\\\nW1122 04:46:43.259548 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 04:46:43.259818 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 04:46:43.261168 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3444382197/tls.crt::/tmp/serving-cert-3444382197/tls.key\\\\\\\"\\\\nF1122 04:46:43.633056 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.705235 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.716582 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.725627 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.743406 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.757884 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762247 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-netns\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762387 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762499 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-netd\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762607 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-netd\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762404 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-netns\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762617 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrcwm\" (UniqueName: \"kubernetes.io/projected/bad3107e-91a9-463d-b981-fb102616bdbe-kube-api-access-hrcwm\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762559 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762786 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-cni-multus\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762815 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnwvs\" (UniqueName: \"kubernetes.io/projected/7a2e6333-2885-4eaf-a4b3-6613127e6375-kube-api-access-mnwvs\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762834 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-node-log\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.762876 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-cni-multus\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763065 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-node-log\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763190 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-cni-bin\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763264 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-cni-bin\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763302 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-hostroot\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763328 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phk4m\" (UniqueName: \"kubernetes.io/projected/126f010b-a640-4133-b63f-d2976da99215-kube-api-access-phk4m\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763330 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-hostroot\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763364 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-os-release\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763392 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-var-lib-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763454 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-kubelet\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763555 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-ovn\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763589 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-ovn\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763605 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/126f010b-a640-4133-b63f-d2976da99215-proxy-tls\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763541 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-os-release\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763513 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-kubelet\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763495 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-var-lib-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763733 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-cnibin\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763760 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-conf-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763774 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-cnibin\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763780 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-etc-kubernetes\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763802 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-systemd-units\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763803 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-conf-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763820 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-systemd\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763845 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cnibin\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763852 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-etc-kubernetes\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763865 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763898 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cnibin\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763909 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-daemon-config\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763845 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-systemd-units\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763914 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763936 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-script-lib\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763869 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-systemd\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763985 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-netns\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.763957 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-netns\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764109 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-system-cni-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764132 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-os-release\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764153 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-k8s-cni-cncf-io\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764176 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-slash\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764194 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-ovn-kubernetes\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764204 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-system-cni-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764212 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-config\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764236 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764256 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764278 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-os-release\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764279 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-multus-certs\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764308 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-multus-certs\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764314 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-log-socket\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764376 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cni-binary-copy\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764421 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-socket-dir-parent\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764441 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bad3107e-91a9-463d-b981-fb102616bdbe-ovn-node-metrics-cert\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764476 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-etc-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764495 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-bin\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764515 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-env-overrides\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764535 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/126f010b-a640-4133-b63f-d2976da99215-rootfs\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764554 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-cni-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764572 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/126f010b-a640-4133-b63f-d2976da99215-mcd-auth-proxy-config\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764593 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7a2e6333-2885-4eaf-a4b3-6613127e6375-cni-binary-copy\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764611 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-system-cni-dir\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764628 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vkzw\" (UniqueName: \"kubernetes.io/projected/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-kube-api-access-9vkzw\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764645 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-kubelet\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764760 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-var-lib-kubelet\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764763 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-script-lib\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764775 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-daemon-config\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764810 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-bin\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764239 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-host-run-k8s-cni-cncf-io\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764831 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-slash\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764874 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-ovn-kubernetes\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764902 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-config\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765042 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-socket-dir-parent\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.764330 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-log-socket\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765379 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765520 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/126f010b-a640-4133-b63f-d2976da99215-mcd-auth-proxy-config\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765556 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-cni-binary-copy\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765598 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-system-cni-dir\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765659 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765665 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-etc-openvswitch\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765798 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/126f010b-a640-4133-b63f-d2976da99215-rootfs\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765890 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7a2e6333-2885-4eaf-a4b3-6613127e6375-multus-cni-dir\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.765952 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-env-overrides\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.766072 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7a2e6333-2885-4eaf-a4b3-6613127e6375-cni-binary-copy\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.769372 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bad3107e-91a9-463d-b981-fb102616bdbe-ovn-node-metrics-cert\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.769725 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/126f010b-a640-4133-b63f-d2976da99215-proxy-tls\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.774553 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.785266 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vkzw\" (UniqueName: \"kubernetes.io/projected/720ed0a4-d93b-4f64-88f7-dfd7b218adc4-kube-api-access-9vkzw\") pod \"multus-additional-cni-plugins-kx6rn\" (UID: \"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\") " pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.785240 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.785548 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phk4m\" (UniqueName: \"kubernetes.io/projected/126f010b-a640-4133-b63f-d2976da99215-kube-api-access-phk4m\") pod \"machine-config-daemon-pf8gx\" (UID: \"126f010b-a640-4133-b63f-d2976da99215\") " pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.785880 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrcwm\" (UniqueName: \"kubernetes.io/projected/bad3107e-91a9-463d-b981-fb102616bdbe-kube-api-access-hrcwm\") pod \"ovnkube-node-bspvz\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.787000 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnwvs\" (UniqueName: \"kubernetes.io/projected/7a2e6333-2885-4eaf-a4b3-6613127e6375-kube-api-access-mnwvs\") pod \"multus-mw95l\" (UID: \"7a2e6333-2885-4eaf-a4b3-6613127e6375\") " pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.797842 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.827137 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.845973 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.857854 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-mw95l" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.863785 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" Nov 22 04:47:00 crc kubenswrapper[4948]: W1122 04:47:00.886017 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod720ed0a4_d93b_4f64_88f7_dfd7b218adc4.slice/crio-aa3b29926105d632f2753505b3929d28c61700283badae54580aac7adbb61d27 WatchSource:0}: Error finding container aa3b29926105d632f2753505b3929d28c61700283badae54580aac7adbb61d27: Status 404 returned error can't find the container with id aa3b29926105d632f2753505b3929d28c61700283badae54580aac7adbb61d27 Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.910290 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xfvlb" event={"ID":"a357c8ff-8016-42b1-80b6-1ead105abc52","Type":"ContainerStarted","Data":"88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.910342 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xfvlb" event={"ID":"a357c8ff-8016-42b1-80b6-1ead105abc52","Type":"ContainerStarted","Data":"13f76f8c4b411bc91cace4c5775348cdf501f69b98c7bd71ad8ec9d6e5ae42d9"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.919170 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.919218 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.919228 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e8f187901980cb5634883e25945116d237cac9e53326dd723e31da0f6aa98496"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.926823 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"79f8755fb0c62062a806841d050a54b08a589d59dd2570be78e6ca8ce284937c"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.930856 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:43Z\\\",\\\"message\\\":\\\"W1122 04:46:42.963332 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 04:46:42.963883 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763786802 cert, and key in /tmp/serving-cert-3444382197/serving-signer.crt, /tmp/serving-cert-3444382197/serving-signer.key\\\\nI1122 04:46:43.251870 1 observer_polling.go:159] Starting file observer\\\\nW1122 04:46:43.259548 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 04:46:43.259818 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 04:46:43.261168 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3444382197/tls.crt::/tmp/serving-cert-3444382197/tls.key\\\\\\\"\\\\nF1122 04:46:43.633056 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.933814 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerStarted","Data":"a2e1205a3829f50642b6c86a83dc1b7dda0ca449847c8ac41305b98702c16d4f"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.935129 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"3d958e2fe34afc5feb93eb917a1a08cbaba7b7be6280183ea05e008f03731105"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.945530 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.946951 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.951270 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea" exitCode=255 Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.951383 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.951455 4948 scope.go:117] "RemoveContainer" containerID="fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.952062 4948 scope.go:117] "RemoveContainer" containerID="4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea" Nov 22 04:47:00 crc kubenswrapper[4948]: E1122 04:47:00.952331 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.956275 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerStarted","Data":"aa3b29926105d632f2753505b3929d28c61700283badae54580aac7adbb61d27"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.958555 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:00Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.958923 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"4f8eaf05b8c804884443cc00633061cb268ec71a5495302043a55b4d4987f56f"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.960841 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.960894 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e47311878483b863648233f8d981dcd7dcb15d042c214da138227c0442bf574f"} Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.976608 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:00Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:00 crc kubenswrapper[4948]: I1122 04:47:00.991193 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:00Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.010444 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.026716 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.041824 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.055640 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.074059 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.091426 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.112854 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.127358 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.150738 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:43Z\\\",\\\"message\\\":\\\"W1122 04:46:42.963332 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 04:46:42.963883 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763786802 cert, and key in /tmp/serving-cert-3444382197/serving-signer.crt, /tmp/serving-cert-3444382197/serving-signer.key\\\\nI1122 04:46:43.251870 1 observer_polling.go:159] Starting file observer\\\\nW1122 04:46:43.259548 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 04:46:43.259818 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 04:46:43.261168 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3444382197/tls.crt::/tmp/serving-cert-3444382197/tls.key\\\\\\\"\\\\nF1122 04:46:43.633056 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.204125 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.223216 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.242743 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.264810 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.272026 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.272207 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.272329 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.272389 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:47:03.272341741 +0000 UTC m=+25.958352257 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.272491 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:03.272478745 +0000 UTC m=+25.958489401 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.280919 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.297454 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.308580 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.321338 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.351617 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.373498 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.373541 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.373573 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373671 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373688 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373700 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373734 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:03.373723108 +0000 UTC m=+26.059733624 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373781 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373789 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373797 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373826 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:03.373819731 +0000 UTC m=+26.059830247 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373871 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.373961 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:03.373942694 +0000 UTC m=+26.059953200 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.390019 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.429784 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.678038 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.682579 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.689949 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.697705 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.711885 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.725255 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.746348 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.758115 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.758167 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.758215 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.758308 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.758429 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.758615 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.763613 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.764810 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.765717 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.766704 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.766978 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.789412 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:43Z\\\",\\\"message\\\":\\\"W1122 04:46:42.963332 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 04:46:42.963883 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763786802 cert, and key in /tmp/serving-cert-3444382197/serving-signer.crt, /tmp/serving-cert-3444382197/serving-signer.key\\\\nI1122 04:46:43.251870 1 observer_polling.go:159] Starting file observer\\\\nW1122 04:46:43.259548 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 04:46:43.259818 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 04:46:43.261168 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3444382197/tls.crt::/tmp/serving-cert-3444382197/tls.key\\\\\\\"\\\\nF1122 04:46:43.633056 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.808561 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.823113 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.836360 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.853594 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.897233 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.934400 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.970009 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4"} Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.970076 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58"} Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.971652 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerStarted","Data":"52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e"} Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.973432 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.975915 4948 scope.go:117] "RemoveContainer" containerID="4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea" Nov 22 04:47:01 crc kubenswrapper[4948]: E1122 04:47:01.976105 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.976996 4948 generic.go:334] "Generic (PLEG): container finished" podID="720ed0a4-d93b-4f64-88f7-dfd7b218adc4" containerID="a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843" exitCode=0 Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.977116 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerDied","Data":"a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843"} Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.978822 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6" exitCode=0 Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.978862 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} Nov 22 04:47:01 crc kubenswrapper[4948]: I1122 04:47:01.978985 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:01Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.018038 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-vthsw"] Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.018595 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.023503 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.023972 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcc4fb85620dc4a27c17800503a918c967d16f06803d849639fecd4aa3475741\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:43Z\\\",\\\"message\\\":\\\"W1122 04:46:42.963332 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 04:46:42.963883 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763786802 cert, and key in /tmp/serving-cert-3444382197/serving-signer.crt, /tmp/serving-cert-3444382197/serving-signer.key\\\\nI1122 04:46:43.251870 1 observer_polling.go:159] Starting file observer\\\\nW1122 04:46:43.259548 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 04:46:43.259818 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 04:46:43.261168 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3444382197/tls.crt::/tmp/serving-cert-3444382197/tls.key\\\\\\\"\\\\nF1122 04:46:43.633056 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.042899 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.062042 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.080919 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.138084 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.175545 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.182523 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ad55002c-24cf-45f1-b251-f69c822a8d87-serviceca\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.182571 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvsd6\" (UniqueName: \"kubernetes.io/projected/ad55002c-24cf-45f1-b251-f69c822a8d87-kube-api-access-zvsd6\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.182607 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad55002c-24cf-45f1-b251-f69c822a8d87-host\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.212392 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.256490 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.283560 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvsd6\" (UniqueName: \"kubernetes.io/projected/ad55002c-24cf-45f1-b251-f69c822a8d87-kube-api-access-zvsd6\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.283618 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad55002c-24cf-45f1-b251-f69c822a8d87-host\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.283681 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ad55002c-24cf-45f1-b251-f69c822a8d87-serviceca\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.284574 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ad55002c-24cf-45f1-b251-f69c822a8d87-serviceca\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.284708 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ad55002c-24cf-45f1-b251-f69c822a8d87-host\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.292002 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.324789 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvsd6\" (UniqueName: \"kubernetes.io/projected/ad55002c-24cf-45f1-b251-f69c822a8d87-kube-api-access-zvsd6\") pod \"node-ca-vthsw\" (UID: \"ad55002c-24cf-45f1-b251-f69c822a8d87\") " pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.369127 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vthsw" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.369838 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: W1122 04:47:02.386779 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad55002c_24cf_45f1_b251_f69c822a8d87.slice/crio-7d5463ab4d72c9ccdf31fad81a2b0539f47efa4a1cf09bcca767ca352e59223e WatchSource:0}: Error finding container 7d5463ab4d72c9ccdf31fad81a2b0539f47efa4a1cf09bcca767ca352e59223e: Status 404 returned error can't find the container with id 7d5463ab4d72c9ccdf31fad81a2b0539f47efa4a1cf09bcca767ca352e59223e Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.404984 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.432088 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.477106 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.515543 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.551812 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.591673 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.629603 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.672578 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.711256 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.754344 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.789669 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.834514 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.877362 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.911090 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.952087 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.988200 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerStarted","Data":"59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb"} Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.990900 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.993757 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.993795 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.993808 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.993820 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.996603 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918"} Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.998555 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vthsw" event={"ID":"ad55002c-24cf-45f1-b251-f69c822a8d87","Type":"ContainerStarted","Data":"a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1"} Nov 22 04:47:02 crc kubenswrapper[4948]: I1122 04:47:02.998640 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vthsw" event={"ID":"ad55002c-24cf-45f1-b251-f69c822a8d87","Type":"ContainerStarted","Data":"7d5463ab4d72c9ccdf31fad81a2b0539f47efa4a1cf09bcca767ca352e59223e"} Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.036914 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.075887 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.113096 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.148639 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.192103 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.229701 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.272806 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.292733 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.292846 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.292914 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.292970 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:07.292956893 +0000 UTC m=+29.978967409 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.293025 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:47:07.292980034 +0000 UTC m=+29.978990580 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.309610 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.357442 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.393864 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.393988 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.394038 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394208 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394282 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394327 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394350 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394298 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:07.394274458 +0000 UTC m=+30.080285014 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394448 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:07.394427673 +0000 UTC m=+30.080438199 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394281 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394503 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394517 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.394555 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:07.394545646 +0000 UTC m=+30.080556172 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.399385 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.430195 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.476051 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.515519 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.554432 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.594239 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.638235 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.669557 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:03Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.757865 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.757974 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.758050 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:03 crc kubenswrapper[4948]: I1122 04:47:03.758073 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.758198 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:03 crc kubenswrapper[4948]: E1122 04:47:03.758304 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.012028 4948 generic.go:334] "Generic (PLEG): container finished" podID="720ed0a4-d93b-4f64-88f7-dfd7b218adc4" containerID="59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb" exitCode=0 Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.012127 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerDied","Data":"59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb"} Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.020029 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.020146 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.048659 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.075064 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.098090 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.116125 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.128340 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.148672 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.168514 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.182618 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.196927 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.207068 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.222382 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.234928 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.245646 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:04 crc kubenswrapper[4948]: I1122 04:47:04.259569 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:04Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.026908 4948 generic.go:334] "Generic (PLEG): container finished" podID="720ed0a4-d93b-4f64-88f7-dfd7b218adc4" containerID="dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3" exitCode=0 Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.026978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerDied","Data":"dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3"} Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.048124 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.065921 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.080143 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.107954 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.124812 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.141217 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.153639 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.169515 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.188488 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.205192 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.221952 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.236020 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.260841 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.283368 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.757133 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.757252 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:05 crc kubenswrapper[4948]: E1122 04:47:05.757319 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.757373 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:05 crc kubenswrapper[4948]: E1122 04:47:05.757617 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:05 crc kubenswrapper[4948]: E1122 04:47:05.757756 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.925342 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.928545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.928587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.928604 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.928691 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.945879 4948 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.946141 4948 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.947627 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.947684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.947700 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.947727 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.947743 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:05Z","lastTransitionTime":"2025-11-22T04:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:05 crc kubenswrapper[4948]: E1122 04:47:05.984621 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:05Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.991662 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.991732 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.991752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.991781 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:05 crc kubenswrapper[4948]: I1122 04:47:05.991800 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:05Z","lastTransitionTime":"2025-11-22T04:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: E1122 04:47:06.012097 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.017020 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.017132 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.017207 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.017303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.017391 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.033175 4948 generic.go:334] "Generic (PLEG): container finished" podID="720ed0a4-d93b-4f64-88f7-dfd7b218adc4" containerID="e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247" exitCode=0 Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.033253 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerDied","Data":"e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247"} Nov 22 04:47:06 crc kubenswrapper[4948]: E1122 04:47:06.036505 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.041071 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.041113 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.041126 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.041144 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.041156 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.042163 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.048904 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: E1122 04:47:06.069745 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.071983 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.073627 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.073676 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.073689 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.073706 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.073716 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.081109 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: E1122 04:47:06.085828 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: E1122 04:47:06.085964 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.087498 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.087517 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.087526 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.087544 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.087557 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.101719 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.120423 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.135706 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.150988 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.164538 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.179012 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.189851 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.189894 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.189907 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.189929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.189949 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.195390 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.214631 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.228252 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.244513 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.258352 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:06Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.294140 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.294209 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.294228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.294252 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.294268 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.398267 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.398337 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.398354 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.398385 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.398411 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.502114 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.502180 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.502191 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.502212 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.502228 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.604948 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.605009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.605030 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.605057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.605077 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.708091 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.708134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.708147 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.708170 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.708183 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.811906 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.811980 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.811993 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.812019 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.812036 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.914728 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.914776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.914791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.914812 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:06 crc kubenswrapper[4948]: I1122 04:47:06.914827 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:06Z","lastTransitionTime":"2025-11-22T04:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.018528 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.018588 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.018602 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.018624 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.018638 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.050038 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerStarted","Data":"3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.070326 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.087104 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.118421 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.121387 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.121421 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.121429 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.121447 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.121477 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.137271 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.155190 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.173210 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.194582 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.215171 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.226226 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.226288 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.226300 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.226321 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.226334 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.233324 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.256357 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.271049 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.286525 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.302695 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.318541 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:07Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.331035 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.331299 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.331312 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.331332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.331345 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.366772 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.366873 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.367005 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.367076 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:47:15.36703668 +0000 UTC m=+38.053047216 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.367131 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:15.367118482 +0000 UTC m=+38.053128998 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.434841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.434895 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.434907 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.434928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.434942 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.467183 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.467245 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.467275 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.467393 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.467445 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:15.467430719 +0000 UTC m=+38.153441245 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.467766 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.467811 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.467835 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.467913 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:15.467886452 +0000 UTC m=+38.153897008 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.468020 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.468040 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.468056 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.468100 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:15.468086328 +0000 UTC m=+38.154096884 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.537887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.537941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.537953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.537973 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.537986 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.641027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.641099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.641144 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.641170 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.641190 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.743654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.743699 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.743712 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.743731 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.743744 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.758008 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.758041 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.758213 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.758363 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.758444 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:07 crc kubenswrapper[4948]: E1122 04:47:07.758687 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.847795 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.847941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.848019 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.848097 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.848193 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.951935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.951995 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.952008 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.952030 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:07 crc kubenswrapper[4948]: I1122 04:47:07.952045 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:07Z","lastTransitionTime":"2025-11-22T04:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.055252 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.055295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.055309 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.055333 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.055346 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.059400 4948 generic.go:334] "Generic (PLEG): container finished" podID="720ed0a4-d93b-4f64-88f7-dfd7b218adc4" containerID="3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17" exitCode=0 Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.059583 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerDied","Data":"3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.071016 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.072340 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.077815 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.092658 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.108450 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.112089 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.123760 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.141830 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.157877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.157921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.157933 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.157951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.157962 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.177659 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.199234 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.224385 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.239822 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.252880 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.260375 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.260399 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.260407 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.260420 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.260428 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.265816 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.279257 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.293371 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.307766 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.320717 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.331043 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.346813 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.356546 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.357737 4948 scope.go:117] "RemoveContainer" containerID="4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea" Nov 22 04:47:08 crc kubenswrapper[4948]: E1122 04:47:08.358046 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.362883 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.362913 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.362924 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.362938 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.362950 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.365295 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.374343 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.385077 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.397498 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.408782 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.420893 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.438722 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.455054 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.465516 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.465556 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.465567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.465583 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.465593 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.468566 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.483675 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.500601 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:08Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.567117 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.567152 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.567161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.567174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.567185 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.670386 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.670445 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.670484 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.670510 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.670527 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.772393 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.772453 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.772509 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.772535 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.772552 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.875051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.875119 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.875139 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.875169 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.875186 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.978301 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.978346 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.978359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.978386 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:08 crc kubenswrapper[4948]: I1122 04:47:08.978404 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:08Z","lastTransitionTime":"2025-11-22T04:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.078809 4948 generic.go:334] "Generic (PLEG): container finished" podID="720ed0a4-d93b-4f64-88f7-dfd7b218adc4" containerID="16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9" exitCode=0 Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.078896 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerDied","Data":"16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.079002 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.079582 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.083193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.083224 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.083234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.083248 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.083260 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.095526 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.108310 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.110294 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.131390 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.160151 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.176147 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.192513 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.192564 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.192577 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.192800 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.192815 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.195444 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.217875 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.231718 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.251928 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.267244 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.283215 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.294980 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.295358 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.295380 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.295390 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.295410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.295420 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.309273 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.326303 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.339931 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.350277 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.370626 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.387609 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.397808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.397856 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.397871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.397891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.397907 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.402085 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.414946 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.428436 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.442185 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.456190 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.471482 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.485592 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.495418 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.507234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.507272 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.507281 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.507296 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.507309 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.512815 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.527830 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.610298 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.610347 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.610358 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.610378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.610391 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.716586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.716654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.716673 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.716703 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.716723 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.757697 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:09 crc kubenswrapper[4948]: E1122 04:47:09.757855 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.757951 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.758050 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:09 crc kubenswrapper[4948]: E1122 04:47:09.758186 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:09 crc kubenswrapper[4948]: E1122 04:47:09.758277 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.782513 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.795955 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.813154 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.826114 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.826166 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.826181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.826204 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.826221 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.827664 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.854708 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.872638 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.888235 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.904134 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.916702 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.929793 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.930784 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.930846 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.930871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.930887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.930897 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:09Z","lastTransitionTime":"2025-11-22T04:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.944172 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.958548 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.969701 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:09 crc kubenswrapper[4948]: I1122 04:47:09.979959 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:09Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.033479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.033516 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.033524 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.033536 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.033545 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.086808 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.088064 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" event={"ID":"720ed0a4-d93b-4f64-88f7-dfd7b218adc4","Type":"ContainerStarted","Data":"4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.102296 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.116744 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.127800 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.136356 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.136437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.136493 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.136527 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.136547 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.141506 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.158674 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.173248 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.185710 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.200956 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.215844 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.231291 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.240552 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.240815 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.240906 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.241437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.241569 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.247244 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.265390 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.283706 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.307180 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:10Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.345315 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.345686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.347391 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.347544 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.347610 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.451005 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.451040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.451048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.451065 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.451076 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.554271 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.554314 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.554332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.554353 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.554366 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.656868 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.656925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.656939 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.656961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.656977 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.760241 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.760294 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.760304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.760324 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.760338 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.863037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.863118 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.863139 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.863167 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.863187 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.965898 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.966124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.966290 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.966422 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:10 crc kubenswrapper[4948]: I1122 04:47:10.966535 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:10Z","lastTransitionTime":"2025-11-22T04:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.070876 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.070961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.070983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.071013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.071032 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.091716 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.174041 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.174104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.174121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.174146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.174165 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.276373 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.276449 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.276494 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.276523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.276540 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.379737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.379807 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.379828 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.379858 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.379881 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.482639 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.482995 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.483015 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.483032 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.483043 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.585342 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.585377 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.585387 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.585401 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.585412 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.689102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.689525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.689597 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.689726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.689799 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.758078 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.758158 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.758089 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:11 crc kubenswrapper[4948]: E1122 04:47:11.758259 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:11 crc kubenswrapper[4948]: E1122 04:47:11.758383 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:11 crc kubenswrapper[4948]: E1122 04:47:11.758585 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.793081 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.793131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.793142 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.793160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.793178 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.896223 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.896272 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.896284 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.896303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.896319 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.998685 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.998740 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.998753 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.998771 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:11 crc kubenswrapper[4948]: I1122 04:47:11.998782 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:11Z","lastTransitionTime":"2025-11-22T04:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.102092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.102181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.102204 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.102234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.102256 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.205881 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.205944 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.205959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.205983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.205998 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.310028 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.310089 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.310104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.312495 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.312521 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.414896 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.415092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.415169 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.415229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.415282 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.517833 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.517865 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.517873 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.517885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.517895 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.619671 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.619908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.619967 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.620033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.620093 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.653942 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l"] Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.654507 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: W1122 04:47:12.656550 4948 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd": failed to list *v1.Secret: secrets "ovn-kubernetes-control-plane-dockercfg-gs7dd" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 22 04:47:12 crc kubenswrapper[4948]: E1122 04:47:12.656631 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-gs7dd\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-kubernetes-control-plane-dockercfg-gs7dd\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 04:47:12 crc kubenswrapper[4948]: W1122 04:47:12.657652 4948 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert": failed to list *v1.Secret: secrets "ovn-control-plane-metrics-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 22 04:47:12 crc kubenswrapper[4948]: E1122 04:47:12.657691 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-control-plane-metrics-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.675507 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.708132 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.730822 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.731037 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.731118 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.731174 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l2pk\" (UniqueName: \"kubernetes.io/projected/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-kube-api-access-2l2pk\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.734084 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.734133 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.734149 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.734173 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.734187 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.746791 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.761330 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.775297 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.795927 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.810496 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.826478 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.831901 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.832066 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.832319 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l2pk\" (UniqueName: \"kubernetes.io/projected/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-kube-api-access-2l2pk\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.832556 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.832860 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.833352 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.836694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.836774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.836830 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.836887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.836949 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.857740 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.866077 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l2pk\" (UniqueName: \"kubernetes.io/projected/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-kube-api-access-2l2pk\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.877966 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.890021 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.907933 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.919213 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.931912 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.939053 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.939094 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.939107 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.939127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.939139 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:12Z","lastTransitionTime":"2025-11-22T04:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:12 crc kubenswrapper[4948]: I1122 04:47:12.941557 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:12Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.041794 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.041834 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.041843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.041857 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.041869 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.144736 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.144800 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.144811 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.144829 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.144840 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.247150 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.247194 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.247207 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.247228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.247247 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.349716 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.349774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.349787 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.349806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.349820 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.452638 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.452694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.452712 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.452734 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.452745 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.550234 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.556016 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.556089 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.556102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.556127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.556142 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.659162 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.659209 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.659248 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.659266 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.659303 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.757977 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:13 crc kubenswrapper[4948]: E1122 04:47:13.758345 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.758392 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:13 crc kubenswrapper[4948]: E1122 04:47:13.758568 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.758629 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:13 crc kubenswrapper[4948]: E1122 04:47:13.758699 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.762972 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.763001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.763010 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.763620 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.763649 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: E1122 04:47:13.832593 4948 secret.go:188] Couldn't get secret openshift-ovn-kubernetes/ovn-control-plane-metrics-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 04:47:13 crc kubenswrapper[4948]: E1122 04:47:13.832690 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovn-control-plane-metrics-cert podName:42ab8dc1-4daa-4eef-b2c7-9ffa774b8411 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:14.332670266 +0000 UTC m=+37.018680782 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ovn-control-plane-metrics-cert" (UniqueName: "kubernetes.io/secret/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovn-control-plane-metrics-cert") pod "ovnkube-control-plane-749d76644c-7sg6l" (UID: "42ab8dc1-4daa-4eef-b2c7-9ffa774b8411") : failed to sync secret cache: timed out waiting for the condition Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.866413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.866843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.867112 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.867340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.867598 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.970932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.971172 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.971234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.971396 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:13 crc kubenswrapper[4948]: I1122 04:47:13.971495 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:13Z","lastTransitionTime":"2025-11-22T04:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.074767 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.075129 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.075271 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.075419 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.075643 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.104390 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/0.log" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.110577 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f" exitCode=1 Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.110661 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.111676 4948 scope.go:117] "RemoveContainer" containerID="0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.135799 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.160088 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.181870 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-btkdx"] Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.182005 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.182511 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:14 crc kubenswrapper[4948]: E1122 04:47:14.183652 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.185414 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.185480 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.185492 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.185513 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.185527 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.202114 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.218137 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.229221 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.237953 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.252418 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b52vx\" (UniqueName: \"kubernetes.io/projected/9a35ebfd-12d4-4129-9c61-9d5880130fa0-kube-api-access-b52vx\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.252643 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.253901 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.285612 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:13Z\\\",\\\"message\\\":\\\":13.693509 6241 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.693980 6241 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694126 6241 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694637 6241 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:13.694709 6241 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:13.694739 6241 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 04:47:13.694752 6241 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 04:47:13.694791 6241 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:13.694898 6241 factory.go:656] Stopping watch factory\\\\nI1122 04:47:13.694928 6241 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:13.694992 6241 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:13.695010 6241 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:13.695023 6241 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 04:47:13.695038 6241 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.288994 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.289033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.289048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.289068 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.289083 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.307231 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.324250 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.342018 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.353981 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.354060 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b52vx\" (UniqueName: \"kubernetes.io/projected/9a35ebfd-12d4-4129-9c61-9d5880130fa0-kube-api-access-b52vx\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.354117 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:14 crc kubenswrapper[4948]: E1122 04:47:14.354289 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:14 crc kubenswrapper[4948]: E1122 04:47:14.354372 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:14.854349436 +0000 UTC m=+37.540359992 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.357833 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.359169 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/42ab8dc1-4daa-4eef-b2c7-9ffa774b8411-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7sg6l\" (UID: \"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.378361 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b52vx\" (UniqueName: \"kubernetes.io/projected/9a35ebfd-12d4-4129-9c61-9d5880130fa0-kube-api-access-b52vx\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.380768 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.391692 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.391737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.391750 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.391768 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.391780 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.393504 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.405588 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.418070 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.431005 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.443380 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.460154 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.471492 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.475120 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.490750 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.494425 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.494479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.494491 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.494508 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.494520 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.502020 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.519169 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:13Z\\\",\\\"message\\\":\\\":13.693509 6241 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.693980 6241 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694126 6241 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694637 6241 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:13.694709 6241 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:13.694739 6241 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 04:47:13.694752 6241 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 04:47:13.694791 6241 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:13.694898 6241 factory.go:656] Stopping watch factory\\\\nI1122 04:47:13.694928 6241 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:13.694992 6241 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:13.695010 6241 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:13.695023 6241 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 04:47:13.695038 6241 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.537748 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.564352 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.577270 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.593079 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.597251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.597289 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.597301 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.597318 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.597327 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.607983 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.620989 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.634171 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.643754 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:14Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.699211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.699240 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.699249 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.699261 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.699269 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.803645 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.803684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.803695 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.803713 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.803725 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.859889 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:14 crc kubenswrapper[4948]: E1122 04:47:14.860116 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:14 crc kubenswrapper[4948]: E1122 04:47:14.860216 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:15.860196421 +0000 UTC m=+38.546206947 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.906229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.906263 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.906272 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.906285 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:14 crc kubenswrapper[4948]: I1122 04:47:14.906297 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:14Z","lastTransitionTime":"2025-11-22T04:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.009252 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.009580 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.009603 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.009657 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.009673 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.156945 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.157198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.157487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.157617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.157692 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.159099 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" event={"ID":"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411","Type":"ContainerStarted","Data":"4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.159371 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" event={"ID":"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411","Type":"ContainerStarted","Data":"a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.159491 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" event={"ID":"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411","Type":"ContainerStarted","Data":"fe675d413d805f4f68c5d08dd69e734b8dc076a1bf3523f7b7ddbead3cf39aeb"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.161694 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/0.log" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.164888 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.164984 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.175518 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.193287 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.208111 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.227324 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.241324 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.251636 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.261847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.262085 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.262101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.262122 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.262133 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.265086 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.283257 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:13Z\\\",\\\"message\\\":\\\":13.693509 6241 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.693980 6241 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694126 6241 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694637 6241 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:13.694709 6241 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:13.694739 6241 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 04:47:13.694752 6241 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 04:47:13.694791 6241 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:13.694898 6241 factory.go:656] Stopping watch factory\\\\nI1122 04:47:13.694928 6241 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:13.694992 6241 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:13.695010 6241 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:13.695023 6241 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 04:47:13.695038 6241 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.297902 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.310553 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.321272 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.336251 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.350276 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.363278 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.364550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.364704 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.364820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.364922 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.365014 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.378191 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.396592 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.410992 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.428206 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.443681 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.457941 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.465137 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.465280 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.465456 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:47:31.465375236 +0000 UTC m=+54.151385792 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.465572 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.465718 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:31.465681995 +0000 UTC m=+54.151692571 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.467694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.467736 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.467750 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.467767 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.467780 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.474844 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.490436 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.504063 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.518934 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.531535 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.545229 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.566155 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.566258 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.566354 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566430 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566506 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566528 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566601 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566622 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566666 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566690 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566604 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:31.566580928 +0000 UTC m=+54.252591484 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566793 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:31.566764203 +0000 UTC m=+54.252774809 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.566823 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:31.566806275 +0000 UTC m=+54.252816901 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.566617 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.570513 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.570606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.570631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.570659 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.570677 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.588367 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.603703 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.617405 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.639413 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:13Z\\\",\\\"message\\\":\\\":13.693509 6241 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.693980 6241 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694126 6241 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694637 6241 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:13.694709 6241 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:13.694739 6241 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 04:47:13.694752 6241 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 04:47:13.694791 6241 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:13.694898 6241 factory.go:656] Stopping watch factory\\\\nI1122 04:47:13.694928 6241 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:13.694992 6241 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:13.695010 6241 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:13.695023 6241 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 04:47:13.695038 6241 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.659357 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:15Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.673742 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.674293 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.674385 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.674484 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.674675 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.757373 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.757419 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.757523 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.757642 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.757679 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.757729 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.757834 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.757959 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.776848 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.776887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.776900 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.776918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.776930 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.868783 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.869020 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: E1122 04:47:15.869151 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:17.869122424 +0000 UTC m=+40.555132980 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.880414 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.880502 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.880527 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.880553 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.880574 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.984148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.984193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.984205 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.984231 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:15 crc kubenswrapper[4948]: I1122 04:47:15.984244 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:15Z","lastTransitionTime":"2025-11-22T04:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.087434 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.087488 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.087501 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.087521 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.087539 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.172603 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/1.log" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.175254 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/0.log" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.178757 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345" exitCode=1 Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.179040 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.179126 4948 scope.go:117] "RemoveContainer" containerID="0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.180440 4948 scope.go:117] "RemoveContainer" containerID="fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345" Nov 22 04:47:16 crc kubenswrapper[4948]: E1122 04:47:16.180729 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.194648 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.194705 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.194726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.194752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.194772 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.201736 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.214423 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.226803 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.246970 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.266366 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.282767 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.296684 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.296786 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.296954 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.296997 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.297056 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.297073 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.307897 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.326446 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:13Z\\\",\\\"message\\\":\\\":13.693509 6241 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.693980 6241 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694126 6241 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694637 6241 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:13.694709 6241 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:13.694739 6241 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 04:47:13.694752 6241 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 04:47:13.694791 6241 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:13.694898 6241 factory.go:656] Stopping watch factory\\\\nI1122 04:47:13.694928 6241 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:13.694992 6241 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:13.695010 6241 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:13.695023 6241 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 04:47:13.695038 6241 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.339704 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.351776 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.399565 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.399608 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.399617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.399631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.399642 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.400919 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.400958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.401001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.401018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.401030 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.401861 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.417229 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: E1122 04:47:16.422225 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.425696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.425727 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.425741 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.425760 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.425776 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.442691 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: E1122 04:47:16.444346 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.448293 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.448324 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.448335 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.448371 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.448384 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.458232 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: E1122 04:47:16.462664 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.466535 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.466577 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.466591 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.466610 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.466624 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.471286 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: E1122 04:47:16.479920 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.484060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.494310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.494321 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.494335 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.494346 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: E1122 04:47:16.508645 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:16Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:16 crc kubenswrapper[4948]: E1122 04:47:16.508761 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.527817 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.527864 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.527872 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.527886 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.527895 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.630309 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.630361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.630378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.630400 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.630418 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.733816 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.733892 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.733917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.733942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.733960 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.837040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.837078 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.837087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.837104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.837116 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.940829 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.940897 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.940911 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.940930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:16 crc kubenswrapper[4948]: I1122 04:47:16.940943 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:16Z","lastTransitionTime":"2025-11-22T04:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.043972 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.044022 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.044056 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.044073 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.044085 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.146630 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.146696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.146712 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.146730 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.146742 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.184107 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/1.log" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.249314 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.249362 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.249373 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.249392 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.249406 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.351935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.352042 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.352061 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.352087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.352104 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.455434 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.455540 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.455558 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.455583 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.455600 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.559656 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.559726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.559743 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.559777 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.559795 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.663198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.663294 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.663334 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.663371 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.663410 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.761645 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.761778 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:17 crc kubenswrapper[4948]: E1122 04:47:17.761822 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.761888 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:17 crc kubenswrapper[4948]: E1122 04:47:17.762074 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.762144 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:17 crc kubenswrapper[4948]: E1122 04:47:17.762279 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:17 crc kubenswrapper[4948]: E1122 04:47:17.762393 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.766410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.766570 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.766593 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.766617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.766676 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.868998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.869031 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.869039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.869052 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.869061 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.892112 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:17 crc kubenswrapper[4948]: E1122 04:47:17.892313 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:17 crc kubenswrapper[4948]: E1122 04:47:17.892379 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:21.892361349 +0000 UTC m=+44.578371865 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.972033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.972104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.972122 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.972620 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:17 crc kubenswrapper[4948]: I1122 04:47:17.972676 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:17Z","lastTransitionTime":"2025-11-22T04:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.075970 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.076044 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.076063 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.076088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.076108 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.179191 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.179222 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.179231 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.179246 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.179255 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.283523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.283595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.283615 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.283649 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.283676 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.387709 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.387877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.387908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.387942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.387966 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.491959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.492081 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.492107 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.492139 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.492165 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.595458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.595569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.595598 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.595641 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.595666 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.699411 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.699506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.699530 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.699559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.699581 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.803932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.804004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.804022 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.804045 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.804063 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.916076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.916133 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.916151 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.916176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:18 crc kubenswrapper[4948]: I1122 04:47:18.916193 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:18Z","lastTransitionTime":"2025-11-22T04:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.019411 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.019501 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.019519 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.019550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.019567 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.123082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.123500 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.123658 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.123852 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.124025 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.227204 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.227278 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.227304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.227333 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.227357 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.330840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.330899 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.330915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.330939 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.330957 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.434612 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.435207 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.435375 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.435622 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.435793 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.538588 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.538734 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.538795 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.538821 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.538838 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.642019 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.642082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.642100 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.642124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.642141 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.746578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.746640 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.746656 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.746679 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.746696 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.758006 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.758096 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.758149 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:19 crc kubenswrapper[4948]: E1122 04:47:19.758197 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.758019 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:19 crc kubenswrapper[4948]: E1122 04:47:19.758433 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:19 crc kubenswrapper[4948]: E1122 04:47:19.758709 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:19 crc kubenswrapper[4948]: E1122 04:47:19.758864 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.778197 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.792239 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.808665 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.827822 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.845514 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.848790 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.848846 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.848873 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.848904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.848927 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.861951 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.878308 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.891821 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.913215 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0985f6868495473e03ecb191846b8b9120985f0c00ad2dbedf1437a30ef9671f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:13Z\\\",\\\"message\\\":\\\":13.693509 6241 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.693980 6241 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694126 6241 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 04:47:13.694637 6241 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:13.694709 6241 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:13.694739 6241 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 04:47:13.694752 6241 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 04:47:13.694791 6241 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:13.694898 6241 factory.go:656] Stopping watch factory\\\\nI1122 04:47:13.694928 6241 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:13.694992 6241 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:13.695010 6241 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:13.695023 6241 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 04:47:13.695038 6241 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.934381 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.953136 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.953182 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.953204 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.953223 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.953237 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:19Z","lastTransitionTime":"2025-11-22T04:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.955253 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.977930 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:19 crc kubenswrapper[4948]: I1122 04:47:19.992198 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:19Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.007207 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:20Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.022452 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:20Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.031421 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:20Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.056929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.056976 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.056988 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.057007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.057019 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.160362 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.160422 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.160438 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.160460 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.160511 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.263324 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.263378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.263395 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.263420 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.263438 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.367070 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.367140 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.367157 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.367180 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.367196 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.471627 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.471730 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.471752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.471821 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.471846 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.576071 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.576112 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.576128 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.576151 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.576168 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.686451 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.686591 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.686627 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.686660 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.686685 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.790039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.790140 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.790159 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.790185 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.790203 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.893650 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.893713 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.893733 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.893757 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.893775 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.997674 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.997759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.997788 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.997826 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:20 crc kubenswrapper[4948]: I1122 04:47:20.997849 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:20Z","lastTransitionTime":"2025-11-22T04:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.101984 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.102108 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.102134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.102167 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.102189 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.204256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.204303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.204319 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.204336 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.204348 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.308163 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.308225 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.308242 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.308269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.308287 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.411696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.411766 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.411784 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.411809 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.411829 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.523879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.523921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.523932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.523954 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.523965 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.627088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.627161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.627173 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.627195 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.627207 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.731076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.731125 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.731136 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.731153 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.731167 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.757803 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.757823 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.757900 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.757944 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:21 crc kubenswrapper[4948]: E1122 04:47:21.758130 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:21 crc kubenswrapper[4948]: E1122 04:47:21.758264 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:21 crc kubenswrapper[4948]: E1122 04:47:21.758408 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:21 crc kubenswrapper[4948]: E1122 04:47:21.758638 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.833937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.833988 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.834004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.834025 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.834036 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.936660 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.936709 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.936725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.936748 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.936765 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:21Z","lastTransitionTime":"2025-11-22T04:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:21 crc kubenswrapper[4948]: I1122 04:47:21.937830 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:21 crc kubenswrapper[4948]: E1122 04:47:21.938015 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:21 crc kubenswrapper[4948]: E1122 04:47:21.938094 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:29.93807226 +0000 UTC m=+52.624082806 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.040619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.040672 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.040690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.040714 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.040734 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.143947 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.144090 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.144114 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.144189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.144232 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.247432 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.247492 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.247501 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.247519 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.247529 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.350537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.350586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.350605 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.350628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.350645 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.453675 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.453742 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.453761 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.453786 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.453804 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.557053 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.557110 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.557127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.557158 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.557175 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.660180 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.660234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.660247 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.660266 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.660279 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.763260 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.763308 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.763323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.763339 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.763353 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.866978 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.867044 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.867065 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.867092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.867117 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.970549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.970636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.970663 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.970693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:22 crc kubenswrapper[4948]: I1122 04:47:22.970717 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:22Z","lastTransitionTime":"2025-11-22T04:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.073124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.073183 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.073198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.073220 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.073236 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.175773 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.175815 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.175828 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.175845 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.175856 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.279911 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.279972 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.279982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.279999 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.280011 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.383138 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.383205 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.383231 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.383266 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.383290 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.419440 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.420874 4948 scope.go:117] "RemoveContainer" containerID="fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345" Nov 22 04:47:23 crc kubenswrapper[4948]: E1122 04:47:23.421314 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.439212 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.462752 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.483673 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.486365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.486413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.486431 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.486453 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.486500 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.500434 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.527135 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.551400 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.567850 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.581531 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.588760 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.588838 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.588862 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.588894 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.588917 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.599669 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.622201 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.643529 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.661048 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.673212 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.690919 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.691691 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.691728 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.691741 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.691759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.691771 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.706302 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.719200 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:23Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.757655 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.757658 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.757825 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:23 crc kubenswrapper[4948]: E1122 04:47:23.757995 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:23 crc kubenswrapper[4948]: E1122 04:47:23.758068 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.758082 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:23 crc kubenswrapper[4948]: E1122 04:47:23.758154 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.758105 4948 scope.go:117] "RemoveContainer" containerID="4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea" Nov 22 04:47:23 crc kubenswrapper[4948]: E1122 04:47:23.758255 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.795825 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.795874 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.795891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.795915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.795932 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.898331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.898359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.898367 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.898380 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:23 crc kubenswrapper[4948]: I1122 04:47:23.898389 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:23Z","lastTransitionTime":"2025-11-22T04:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.001449 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.001568 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.001613 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.001645 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.001672 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.105712 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.105754 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.105765 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.105780 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.105791 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.209026 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.209086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.209103 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.209128 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.209146 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.218656 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.220988 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.221559 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.237246 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.253714 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.277379 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.293426 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.311691 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.311731 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.311742 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.311757 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.311768 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.317193 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.337590 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.354971 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.367581 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.392650 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.409439 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.414944 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.414985 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.414995 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.415009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.415023 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.430522 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.450767 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.463644 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.476744 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.489904 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.503334 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:24Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.517898 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.517955 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.517973 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.517996 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.518013 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.621620 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.621675 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.621889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.621912 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.621931 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.725075 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.725144 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.725161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.725185 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.725203 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.830195 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.830228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.830238 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.830253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.830261 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.932569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.932619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.932635 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.932659 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:24 crc kubenswrapper[4948]: I1122 04:47:24.932677 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:24Z","lastTransitionTime":"2025-11-22T04:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.036478 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.036521 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.036531 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.036545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.036555 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.139505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.139543 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.139552 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.139569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.139580 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.242534 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.242599 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.242617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.242639 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.242654 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.345923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.345969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.345983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.346002 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.346014 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.449352 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.449402 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.449414 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.449432 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.449511 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.551977 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.552025 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.552041 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.552061 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.552076 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.654767 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.654808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.654821 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.654840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.654853 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757287 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:25 crc kubenswrapper[4948]: E1122 04:47:25.757424 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757505 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757533 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:25 crc kubenswrapper[4948]: E1122 04:47:25.757623 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757672 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757702 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757718 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: E1122 04:47:25.757717 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757730 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.757933 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:25 crc kubenswrapper[4948]: E1122 04:47:25.758013 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.861314 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.861678 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.861687 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.861701 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.861712 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.963995 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.964031 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.964039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.964051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:25 crc kubenswrapper[4948]: I1122 04:47:25.964060 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:25Z","lastTransitionTime":"2025-11-22T04:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.066671 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.066761 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.066784 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.066818 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.066842 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.169400 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.169457 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.169516 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.169559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.169582 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.272299 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.272409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.272434 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.272495 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.272515 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.375928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.375994 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.376011 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.376041 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.376060 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.478937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.479004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.479014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.479032 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.479042 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.581918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.581980 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.582002 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.582030 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.582054 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.685713 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.685776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.685799 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.685826 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.685847 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.789427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.789525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.789550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.789578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.789599 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.863758 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.863822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.863840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.863864 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.863883 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.878851 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 04:47:26 crc kubenswrapper[4948]: E1122 04:47:26.887980 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.893108 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.895526 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.895572 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.895590 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.895614 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.895630 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.909559 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: E1122 04:47:26.918621 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.924044 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.924104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.924125 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.924154 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.924179 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.933642 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: E1122 04:47:26.946342 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.950365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.950415 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.950428 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.950445 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.950459 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.954687 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: E1122 04:47:26.964799 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.968218 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.968259 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.968273 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.968293 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.968310 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.969633 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: E1122 04:47:26.981851 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: E1122 04:47:26.982582 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.985101 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.987053 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.987076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.987085 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.987097 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.987105 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:26Z","lastTransitionTime":"2025-11-22T04:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:26 crc kubenswrapper[4948]: I1122 04:47:26.995660 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:26Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.005863 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.016050 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.027764 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.038510 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.048987 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.058510 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.066729 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.079147 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.088080 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.089124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.089155 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.089167 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.089184 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.089196 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.098942 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:27Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.191999 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.192076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.192097 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.192125 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.192147 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.295587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.295953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.296043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.296114 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.296180 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.399899 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.399962 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.399983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.400011 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.400030 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.502928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.502981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.502997 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.503018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.503036 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.605922 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.606078 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.606099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.606124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.606142 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.708971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.709129 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.709152 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.709233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.709251 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.757750 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:27 crc kubenswrapper[4948]: E1122 04:47:27.757940 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.758301 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:27 crc kubenswrapper[4948]: E1122 04:47:27.758515 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.758852 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.759113 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:27 crc kubenswrapper[4948]: E1122 04:47:27.759382 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:27 crc kubenswrapper[4948]: E1122 04:47:27.759583 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.812856 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.812893 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.812903 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.812918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.812930 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.916182 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.916249 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.916267 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.916295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:27 crc kubenswrapper[4948]: I1122 04:47:27.916313 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:27Z","lastTransitionTime":"2025-11-22T04:47:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.020376 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.020433 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.020454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.020524 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.020544 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.122923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.123008 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.123029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.123061 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.123082 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.226333 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.226405 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.226424 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.226448 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.226497 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.329197 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.329254 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.329270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.329296 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.329313 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.433005 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.433514 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.433687 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.433853 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.433998 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.537913 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.538370 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.538575 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.538728 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.538885 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.808717 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.808774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.808791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.808816 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.808835 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.911534 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.911581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.911598 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.911629 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:28 crc kubenswrapper[4948]: I1122 04:47:28.911646 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:28Z","lastTransitionTime":"2025-11-22T04:47:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.014828 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.015388 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.015619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.015774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.015904 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.118803 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.118879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.118900 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.118924 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.118941 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.222294 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.222334 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.222348 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.222365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.222378 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.325007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.325057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.325073 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.325095 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.325113 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.428351 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.428408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.428424 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.428445 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.428458 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.531078 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.531170 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.531189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.531216 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.531236 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.634203 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.634262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.634279 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.634303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.634323 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.737003 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.737058 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.737076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.737101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.737119 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.757313 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.757393 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.757544 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:29 crc kubenswrapper[4948]: E1122 04:47:29.757543 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.757588 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:29 crc kubenswrapper[4948]: E1122 04:47:29.757720 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:29 crc kubenswrapper[4948]: E1122 04:47:29.757854 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:29 crc kubenswrapper[4948]: E1122 04:47:29.758026 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.773529 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.794534 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.812808 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.834778 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.840146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.840193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.840209 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.840228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.840242 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.850368 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.868591 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.881247 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.895328 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.908366 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.924531 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.939783 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.943614 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.943860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.943891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.943921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.943944 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:29Z","lastTransitionTime":"2025-11-22T04:47:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.962967 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:29 crc kubenswrapper[4948]: I1122 04:47:29.999671 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:29Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.015324 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:30Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.018843 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:30 crc kubenswrapper[4948]: E1122 04:47:30.018979 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:30 crc kubenswrapper[4948]: E1122 04:47:30.019083 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:47:46.01905608 +0000 UTC m=+68.705066636 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.041999 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:30Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.048137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.048210 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.048228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.048264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.048296 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.069161 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:30Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.094835 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:30Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.155684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.155721 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.155734 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.155753 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.155767 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.259213 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.259270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.259288 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.259310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.259327 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.362333 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.362424 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.362442 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.362489 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.362509 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.466193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.466263 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.466285 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.466319 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.466381 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.568916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.568971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.568989 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.569014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.569034 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.672196 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.672279 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.672301 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.672335 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.672358 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.775360 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.775413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.775429 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.775451 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.775493 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.878854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.878926 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.878948 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.878975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.878994 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.982363 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.982428 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.982448 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.982497 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:30 crc kubenswrapper[4948]: I1122 04:47:30.982514 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:30Z","lastTransitionTime":"2025-11-22T04:47:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.085298 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.085362 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.085383 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.085413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.085436 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.188195 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.188230 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.188241 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.188256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.188266 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.290687 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.291106 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.291318 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.291555 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.291738 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.394427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.394512 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.394525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.394540 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.394550 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.496242 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.496273 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.496283 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.496299 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.496311 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.535145 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.535403 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:48:03.535366751 +0000 UTC m=+86.221377317 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.535523 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.535677 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.535764 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:48:03.535740782 +0000 UTC m=+86.221751328 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.599261 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.599315 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.599331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.599353 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.599370 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.636001 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.636065 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.636121 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636292 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636327 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636360 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636330 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636396 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636421 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:48:03.636392478 +0000 UTC m=+86.322403034 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636460 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636544 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636515 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:48:03.63645572 +0000 UTC m=+86.322466276 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.636638 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:48:03.636613005 +0000 UTC m=+86.322623561 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.703178 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.703228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.703239 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.703254 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.703265 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.757332 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.757592 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.757817 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.757882 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.757975 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.758123 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.758201 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:31 crc kubenswrapper[4948]: E1122 04:47:31.758343 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.806211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.806259 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.806276 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.806304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.806322 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.908886 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.908921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.908932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.908950 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:31 crc kubenswrapper[4948]: I1122 04:47:31.908963 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:31Z","lastTransitionTime":"2025-11-22T04:47:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.012816 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.012891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.012909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.012935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.012954 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.116039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.116087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.116104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.116128 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.116146 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.219013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.219081 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.219100 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.219126 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.219146 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.329843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.329901 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.329916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.329937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.329953 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.434064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.434167 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.434184 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.434219 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.434243 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.537143 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.537216 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.537243 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.537279 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.537302 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.641385 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.641496 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.641508 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.641532 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.641645 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.744940 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.744976 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.744987 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.745004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.745016 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.847993 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.848056 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.848073 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.848099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.848118 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.950454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.950552 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.950579 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.950608 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:32 crc kubenswrapper[4948]: I1122 04:47:32.950630 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:32Z","lastTransitionTime":"2025-11-22T04:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.053533 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.053567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.053579 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.053595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.053607 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.156703 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.156772 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.156792 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.156817 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.156835 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.259384 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.259502 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.259529 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.259561 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.259586 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.366566 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.366624 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.366647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.366678 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.366698 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.469175 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.469250 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.469271 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.469315 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.469353 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.571797 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.571854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.571871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.571897 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.571914 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.675211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.675270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.675289 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.675314 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.675331 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.757758 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.757809 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.757936 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.757982 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:33 crc kubenswrapper[4948]: E1122 04:47:33.758828 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:33 crc kubenswrapper[4948]: E1122 04:47:33.758969 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:33 crc kubenswrapper[4948]: E1122 04:47:33.759030 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:33 crc kubenswrapper[4948]: E1122 04:47:33.759190 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.778328 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.778375 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.778393 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.778416 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.778434 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.881165 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.881514 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.881720 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.881959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.882213 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.986029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.986082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.986097 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.986117 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:33 crc kubenswrapper[4948]: I1122 04:47:33.986130 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:33Z","lastTransitionTime":"2025-11-22T04:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.090058 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.090134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.090153 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.090179 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.090198 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.193417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.193528 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.193553 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.193583 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.193618 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.296220 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.296304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.296326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.296355 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.296375 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.400160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.400227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.400244 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.400269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.400286 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.503994 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.504091 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.504148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.504174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.504192 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.607456 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.607558 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.607576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.607599 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.607615 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.710568 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.710990 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.711272 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.711572 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.711852 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.759043 4948 scope.go:117] "RemoveContainer" containerID="fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.814737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.814796 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.814816 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.814841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.814859 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.931525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.931921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.931947 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.931981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:34 crc kubenswrapper[4948]: I1122 04:47:34.932005 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:34Z","lastTransitionTime":"2025-11-22T04:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.035537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.035581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.035597 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.035617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.035629 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.138605 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.138842 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.138909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.138947 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.138970 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.242406 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.242536 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.242564 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.242587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.242604 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.346943 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.347009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.347027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.347056 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.347077 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.451163 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.451262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.451286 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.451319 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.451341 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.554530 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.554591 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.554601 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.554622 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.554635 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.657494 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.657582 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.657613 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.657650 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.657674 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.757588 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.757635 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.757670 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.757728 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:35 crc kubenswrapper[4948]: E1122 04:47:35.757872 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:35 crc kubenswrapper[4948]: E1122 04:47:35.758007 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:35 crc kubenswrapper[4948]: E1122 04:47:35.758137 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:35 crc kubenswrapper[4948]: E1122 04:47:35.758212 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.759680 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.759714 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.759727 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.759745 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.759757 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.862449 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.862541 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.862560 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.862590 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.862608 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.965131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.965197 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.965216 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.965241 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:35 crc kubenswrapper[4948]: I1122 04:47:35.965261 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:35Z","lastTransitionTime":"2025-11-22T04:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.067427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.067542 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.067562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.067587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.067606 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.171034 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.171107 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.171129 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.171163 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.171187 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.272904 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/1.log" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.273885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.273935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.273953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.273979 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.274002 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.277690 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.278203 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.297521 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.320546 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.341338 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.376580 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.376619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.376631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.376648 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.376659 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.377734 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.391641 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.406176 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.427200 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.447710 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.458904 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.479641 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.479671 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.479679 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.479691 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.479701 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.481521 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.498960 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.515651 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.534844 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.549970 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.564406 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.582120 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.582160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.582176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.582198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.582215 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.586534 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.599762 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:36Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.684939 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.684994 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.685010 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.685033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.685052 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.788437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.788560 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.788584 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.788611 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.788629 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.891416 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.891510 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.891530 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.891556 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.891573 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.995169 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.995311 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.995340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.995600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:36 crc kubenswrapper[4948]: I1122 04:47:36.995626 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:36Z","lastTransitionTime":"2025-11-22T04:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.098681 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.098806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.098824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.098847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.098863 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.202860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.202925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.202942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.202967 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.202989 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.285076 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/2.log" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.286149 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/1.log" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.289786 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29" exitCode=1 Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.289855 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.289916 4948 scope.go:117] "RemoveContainer" containerID="fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.291197 4948 scope.go:117] "RemoveContainer" containerID="658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.291440 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.305780 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.305847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.305869 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.305901 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.305925 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.310113 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.330516 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.351450 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.367363 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.368892 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.368960 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.368983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.369013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.369039 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.388452 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.394428 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.399731 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.399816 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.399840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.399870 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.399892 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.407565 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.420445 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.425391 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.425443 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.425480 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.425502 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.425515 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.425709 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.451033 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.456036 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.456072 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.456083 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.456098 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.456110 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.463378 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc432332cfe41c2c061fd4f30b40a750bb17896314557672369ea746e3636345\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"message\\\":\\\"Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:15.254435 6424 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:15.254487 6424 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:15.254493 6424 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:15.254514 6424 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:15.254563 6424 factory.go:656] Stopping watch factory\\\\nI1122 04:47:15.254588 6424 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 04:47:15.254596 6424 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:15.254602 6424 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:15.254608 6424 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:15.254614 6424 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:15.254620 6424 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:15.254690 6424 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 04:47:15.254791 6424 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 04:47:15.254822 6424 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:15.254842 6424 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 04:47:15.254928 6424 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.474294 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.479120 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.479178 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.479197 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.479227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.479246 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.481414 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.495116 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.495271 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.497456 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.497548 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.497562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.497581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.497593 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.497738 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.521578 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.537762 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.556372 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.577567 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.594692 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.600598 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.600658 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.600674 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.600697 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.600713 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.613903 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.640453 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:37Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.709960 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.709997 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.710009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.710027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.710040 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.758851 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.758935 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.759027 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.759328 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.759434 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.759572 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.759094 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:37 crc kubenswrapper[4948]: E1122 04:47:37.760158 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.813967 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.814176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.814208 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.814238 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.814545 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.917288 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.917341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.917359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.917381 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:37 crc kubenswrapper[4948]: I1122 04:47:37.917398 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:37Z","lastTransitionTime":"2025-11-22T04:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.021160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.021269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.021291 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.021314 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.021330 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.125002 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.125073 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.125099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.125129 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.125151 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.229050 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.229124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.229146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.229175 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.229196 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.297530 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/2.log" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.302735 4948 scope.go:117] "RemoveContainer" containerID="658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29" Nov 22 04:47:38 crc kubenswrapper[4948]: E1122 04:47:38.302986 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.338412 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.338506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.338526 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.338551 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.338568 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.359111 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.378422 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.396950 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.418768 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.434823 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.441172 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.441242 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.441252 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.441267 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.441279 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.449453 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.460748 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.525615 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.544145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.544207 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.544225 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.544250 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.544267 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.550337 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.570618 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.589906 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.611085 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.627810 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.646778 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.646844 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.646861 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.646885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.646902 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.647299 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.661435 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.675995 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.688648 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:38Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.749881 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.749930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.749941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.749958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.749971 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.852896 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.852982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.852998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.853026 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.853044 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.956496 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.956569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.956588 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.956614 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:38 crc kubenswrapper[4948]: I1122 04:47:38.956630 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:38Z","lastTransitionTime":"2025-11-22T04:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.060384 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.060448 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.060499 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.060527 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.060546 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.102528 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.123351 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.144930 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.163509 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.163581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.163596 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.163623 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.163641 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.166852 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.186664 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.208572 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.228344 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.246816 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.263626 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.267201 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.267258 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.267278 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.267308 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.267329 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.280913 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.301176 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.317452 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.335615 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.365087 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.369985 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.370018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.370030 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.370049 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.370062 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.381387 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.393920 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.426700 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.446161 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.473865 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.473937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.473961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.473992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.474014 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.577099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.577168 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.577188 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.577214 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.577234 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.680719 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.680802 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.680822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.680854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.680879 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.758065 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:39 crc kubenswrapper[4948]: E1122 04:47:39.758212 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.758081 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.758310 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.758370 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:39 crc kubenswrapper[4948]: E1122 04:47:39.758662 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:39 crc kubenswrapper[4948]: E1122 04:47:39.758869 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:39 crc kubenswrapper[4948]: E1122 04:47:39.758966 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.780328 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.784271 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.784334 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.784348 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.784372 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.784392 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.799578 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.817416 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.838434 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.854281 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.868653 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.887959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.887911 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.888021 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.888037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.888065 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.888081 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.907303 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.925702 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.940639 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.971344 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.992040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.992089 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.992101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.992127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.992140 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:39Z","lastTransitionTime":"2025-11-22T04:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:39 crc kubenswrapper[4948]: I1122 04:47:39.993212 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:39Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.008305 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:40Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.029860 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:40Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.053588 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:40Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.070954 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:40Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.089020 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:40Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.096210 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.096304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.096325 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.096358 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.096379 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.199856 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.199896 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.199905 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.199925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.199937 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.303091 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.303157 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.303172 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.303197 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.303214 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.407152 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.407211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.407229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.407259 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.407279 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.511598 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.511670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.511682 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.511704 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.511718 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.615327 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.615403 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.615427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.615455 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.615516 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.719140 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.719176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.719189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.719205 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.719216 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.822951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.823579 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.823726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.823875 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.824100 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.927386 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.927452 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.927518 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.927549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:40 crc kubenswrapper[4948]: I1122 04:47:40.927571 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:40Z","lastTransitionTime":"2025-11-22T04:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.029845 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.029897 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.029914 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.029938 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.029956 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.133510 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.133576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.133590 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.133620 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.133637 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.236567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.236647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.236665 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.236694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.236719 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.339885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.340285 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.340426 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.340567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.340672 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.443927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.443988 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.444007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.444037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.444059 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.547317 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.547418 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.547436 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.547506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.547523 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.650095 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.650155 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.650171 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.650195 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.650210 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.753158 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.753536 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.753694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.753798 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.753896 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.757710 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.757739 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.757760 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:41 crc kubenswrapper[4948]: E1122 04:47:41.758268 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.757780 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:41 crc kubenswrapper[4948]: E1122 04:47:41.758012 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:41 crc kubenswrapper[4948]: E1122 04:47:41.758324 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:41 crc kubenswrapper[4948]: E1122 04:47:41.758535 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.856969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.857358 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.857446 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.857563 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.857763 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.961373 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.961411 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.961421 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.961440 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:41 crc kubenswrapper[4948]: I1122 04:47:41.961452 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:41Z","lastTransitionTime":"2025-11-22T04:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.064636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.065084 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.065181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.065310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.065719 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.170033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.170101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.170125 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.170157 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.170181 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.274017 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.274064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.274077 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.274094 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.274109 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.375935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.375975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.375987 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.376005 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.376016 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.478922 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.478977 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.478994 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.479016 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.479034 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.582127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.582188 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.582198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.582217 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.582229 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.684638 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.684684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.684698 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.684719 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.684741 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.787831 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.787889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.787907 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.787929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.787944 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.897270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.897375 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.897402 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.897428 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:42 crc kubenswrapper[4948]: I1122 04:47:42.897441 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:42Z","lastTransitionTime":"2025-11-22T04:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.001251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.001300 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.001311 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.001331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.001345 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.104119 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.104182 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.104200 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.104222 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.104238 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.208043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.208128 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.208147 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.208175 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.208193 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.311291 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.311338 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.311354 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.311376 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.311396 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.413536 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.413606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.413665 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.413690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.413708 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.516694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.516759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.516779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.516803 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.516821 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.620406 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.620802 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.620820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.620845 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.620863 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.722719 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.722788 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.722803 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.722821 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.722835 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.757781 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.757848 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.757818 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.757781 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:43 crc kubenswrapper[4948]: E1122 04:47:43.757966 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:43 crc kubenswrapper[4948]: E1122 04:47:43.758263 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:43 crc kubenswrapper[4948]: E1122 04:47:43.758169 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:43 crc kubenswrapper[4948]: E1122 04:47:43.758410 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.825814 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.825887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.825910 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.825936 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.825953 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.928854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.928920 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.928944 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.928971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:43 crc kubenswrapper[4948]: I1122 04:47:43.928994 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:43Z","lastTransitionTime":"2025-11-22T04:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.031906 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.031989 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.032001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.032019 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.032030 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.134970 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.135044 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.135066 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.135099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.135122 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.238294 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.238332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.238343 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.238358 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.238368 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.341009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.341043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.341054 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.341070 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.341083 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.443280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.443319 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.443331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.443348 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.443360 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.546659 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.546700 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.546711 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.546728 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.546745 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.649250 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.649295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.649308 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.649323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.649332 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.751389 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.751437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.751450 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.751487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.751502 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.853547 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.853586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.853598 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.853611 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.853621 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.956283 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.956316 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.956327 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.956341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:44 crc kubenswrapper[4948]: I1122 04:47:44.956352 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:44Z","lastTransitionTime":"2025-11-22T04:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.059609 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.059648 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.059658 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.059671 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.059680 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.162153 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.162194 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.162202 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.162215 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.162224 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.265211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.265256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.265269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.265285 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.265300 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.366878 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.366925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.366937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.366955 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.366966 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.474384 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.474418 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.474426 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.474440 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.474451 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.576672 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.576704 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.576712 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.576725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.576734 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.679222 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.679274 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.679287 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.679305 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.679318 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.758005 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.758072 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:45 crc kubenswrapper[4948]: E1122 04:47:45.758138 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.758154 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.758147 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:45 crc kubenswrapper[4948]: E1122 04:47:45.758217 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:45 crc kubenswrapper[4948]: E1122 04:47:45.758282 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:45 crc kubenswrapper[4948]: E1122 04:47:45.758355 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.781642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.781668 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.781677 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.781690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.781699 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.884270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.884304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.884313 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.884325 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.884339 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.986669 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.986699 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.986708 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.986720 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:45 crc kubenswrapper[4948]: I1122 04:47:45.986729 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:45Z","lastTransitionTime":"2025-11-22T04:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.087147 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:46 crc kubenswrapper[4948]: E1122 04:47:46.087394 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:46 crc kubenswrapper[4948]: E1122 04:47:46.087512 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:48:18.087497024 +0000 UTC m=+100.773507530 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.088753 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.088774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.088783 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.088796 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.088806 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.191447 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.191711 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.191730 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.191752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.191770 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.294227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.294281 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.294299 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.294321 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.294337 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.396252 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.396290 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.396302 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.396319 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.396330 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.498568 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.498610 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.498629 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.498650 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.498667 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.600844 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.600881 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.600893 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.600909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.600921 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.703361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.703403 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.703420 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.703450 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.703474 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.806796 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.806848 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.806860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.806881 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.806890 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.909515 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.909550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.909560 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.909580 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:46 crc kubenswrapper[4948]: I1122 04:47:46.909591 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:46Z","lastTransitionTime":"2025-11-22T04:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.012196 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.012238 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.012251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.012266 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.012278 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.115890 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.115952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.115969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.115994 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.116015 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.218532 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.218573 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.218583 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.218600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.218611 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.321983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.322052 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.322081 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.322105 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.322122 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.424857 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.424901 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.424915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.424934 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.424949 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.527597 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.527823 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.527889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.527950 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.528070 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.560538 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.560587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.560601 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.560623 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.560633 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.576017 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:47Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.580309 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.580333 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.580342 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.580356 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.580367 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.596811 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:47Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.600969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.601193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.601282 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.601421 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.601541 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.615867 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:47Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.620135 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.620178 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.620191 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.620211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.620226 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.636334 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:47Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.639560 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.639598 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.639607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.639621 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.639631 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.651912 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:47Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.652076 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.653713 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.653783 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.653798 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.653841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.653855 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.756525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.756562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.756573 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.756587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.756597 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.757230 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.757234 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.757360 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.757453 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.757560 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.757689 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.757780 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:47 crc kubenswrapper[4948]: E1122 04:47:47.757881 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.859593 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.859643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.859656 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.859675 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.859688 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.962686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.962936 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.963013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.963095 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:47 crc kubenswrapper[4948]: I1122 04:47:47.963195 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:47Z","lastTransitionTime":"2025-11-22T04:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.065791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.066042 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.066104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.066163 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.066219 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.169069 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.169112 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.169123 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.169145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.169157 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.271536 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.271577 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.271586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.271600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.271608 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.373849 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.373886 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.373895 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.373910 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.373921 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.475665 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.475690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.475698 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.475710 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.475720 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.578742 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.578780 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.578791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.578808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.578821 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.682738 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.682792 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.682810 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.682834 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.682850 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.786445 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.786527 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.786543 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.786557 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.786568 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.889134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.889203 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.889226 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.889251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.889269 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.991224 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.991280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.991296 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.991318 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:48 crc kubenswrapper[4948]: I1122 04:47:48.991336 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:48Z","lastTransitionTime":"2025-11-22T04:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.093789 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.093836 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.093854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.093877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.093895 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.195493 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.195524 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.195535 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.195550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.195559 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.297811 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.297841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.297851 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.297866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.297876 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.345005 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/0.log" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.345045 4948 generic.go:334] "Generic (PLEG): container finished" podID="7a2e6333-2885-4eaf-a4b3-6613127e6375" containerID="52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e" exitCode=1 Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.345069 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerDied","Data":"52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.345615 4948 scope.go:117] "RemoveContainer" containerID="52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.358200 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.368246 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.381380 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.392675 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.400221 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.400263 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.400280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.400303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.400319 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.407790 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.423120 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.441799 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.460633 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.480822 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.494704 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.502766 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.502810 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.502827 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.502853 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.502870 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.517024 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.534527 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.545785 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.562155 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.573555 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.586033 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.599092 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:48Z\\\",\\\"message\\\":\\\"2025-11-22T04:47:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8\\\\n2025-11-22T04:47:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8 to /host/opt/cni/bin/\\\\n2025-11-22T04:47:03Z [verbose] multus-daemon started\\\\n2025-11-22T04:47:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T04:47:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.605633 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.605668 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.605682 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.605704 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.605721 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.708064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.708102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.708115 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.708133 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.708146 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.757312 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.757367 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:49 crc kubenswrapper[4948]: E1122 04:47:49.757437 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.757502 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.757795 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:49 crc kubenswrapper[4948]: E1122 04:47:49.757896 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.758086 4948 scope.go:117] "RemoveContainer" containerID="658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29" Nov 22 04:47:49 crc kubenswrapper[4948]: E1122 04:47:49.758102 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:49 crc kubenswrapper[4948]: E1122 04:47:49.758236 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:47:49 crc kubenswrapper[4948]: E1122 04:47:49.758334 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.771859 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.784379 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.796451 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.810718 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.810776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.810787 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.810804 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.810816 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.812618 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.832602 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.853488 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.868948 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.883006 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.894680 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.905981 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.912956 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.912987 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.912998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.913013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.913027 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:49Z","lastTransitionTime":"2025-11-22T04:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.924560 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.936653 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.945419 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.954123 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.964360 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.973046 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:49 crc kubenswrapper[4948]: I1122 04:47:49.982822 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:48Z\\\",\\\"message\\\":\\\"2025-11-22T04:47:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8\\\\n2025-11-22T04:47:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8 to /host/opt/cni/bin/\\\\n2025-11-22T04:47:03Z [verbose] multus-daemon started\\\\n2025-11-22T04:47:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T04:47:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:49Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.015586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.015621 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.015632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.015647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.015659 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.119067 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.119123 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.119161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.119187 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.119203 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.222184 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.222233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.222245 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.222264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.222281 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.325115 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.325164 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.325177 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.325194 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.325205 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.351461 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/0.log" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.351646 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerStarted","Data":"c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.368316 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.384897 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.400565 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.418906 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.428647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.428766 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.428790 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.428822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.428844 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.438382 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.449725 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.477446 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.492269 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.505232 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.517156 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.528904 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.531097 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.531164 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.531174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.531193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.531204 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.541141 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.560127 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:48Z\\\",\\\"message\\\":\\\"2025-11-22T04:47:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8\\\\n2025-11-22T04:47:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8 to /host/opt/cni/bin/\\\\n2025-11-22T04:47:03Z [verbose] multus-daemon started\\\\n2025-11-22T04:47:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T04:47:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.575378 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.591226 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.603066 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.612316 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:50Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.633341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.633373 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.633382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.633396 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.633407 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.735767 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.735815 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.735827 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.735843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.735854 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.767940 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.838516 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.838550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.838562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.838578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.838589 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.940432 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.940498 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.940511 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.940526 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:50 crc kubenswrapper[4948]: I1122 04:47:50.940537 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:50Z","lastTransitionTime":"2025-11-22T04:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.042864 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.042905 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.042916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.042931 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.043016 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.144805 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.144842 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.144850 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.144865 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.144875 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.247421 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.247476 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.247487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.247503 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.247515 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.350013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.350088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.350106 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.350131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.350147 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.452824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.452877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.452890 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.452910 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.452922 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.556807 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.556874 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.556904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.556945 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.556972 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.659371 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.659404 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.659413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.659427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.659435 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.758013 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.758086 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.758101 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.758151 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:51 crc kubenswrapper[4948]: E1122 04:47:51.758278 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:51 crc kubenswrapper[4948]: E1122 04:47:51.758429 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:51 crc kubenswrapper[4948]: E1122 04:47:51.758587 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:51 crc kubenswrapper[4948]: E1122 04:47:51.758816 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.764964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.765048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.765074 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.765107 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.765134 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.868579 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.868652 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.868670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.868693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.868711 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.970811 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.970856 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.970868 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.970886 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:51 crc kubenswrapper[4948]: I1122 04:47:51.970895 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:51Z","lastTransitionTime":"2025-11-22T04:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.074033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.074105 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.074128 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.074152 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.074168 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.177103 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.177145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.177160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.177178 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.177191 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.279635 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.279704 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.279721 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.279750 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.279768 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.382523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.382560 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.382573 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.382589 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.382602 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.484943 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.484998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.485009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.485028 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.485041 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.587956 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.587997 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.588007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.588023 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.588033 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.691044 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.691100 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.691116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.691138 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.691155 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.794804 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.794846 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.794855 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.794871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.794882 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.897613 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.897651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.897660 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.897675 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:52 crc kubenswrapper[4948]: I1122 04:47:52.897685 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:52Z","lastTransitionTime":"2025-11-22T04:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.000053 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.000092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.000103 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.000120 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.000132 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.101969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.102017 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.102034 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.102057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.102073 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.204810 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.204859 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.204869 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.204885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.204894 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.308366 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.308427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.308445 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.308497 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.308517 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.411549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.411613 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.411629 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.411651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.411670 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.514879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.514923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.514945 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.514965 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.514979 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.617279 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.617354 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.617378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.617409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.617433 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.720229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.720269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.720280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.720295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.720308 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.757379 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.757422 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.757387 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.757418 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:53 crc kubenswrapper[4948]: E1122 04:47:53.757541 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:53 crc kubenswrapper[4948]: E1122 04:47:53.757684 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:53 crc kubenswrapper[4948]: E1122 04:47:53.757818 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:53 crc kubenswrapper[4948]: E1122 04:47:53.757926 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.823169 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.823219 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.823236 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.823255 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.823267 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.926621 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.926682 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.926693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.926717 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:53 crc kubenswrapper[4948]: I1122 04:47:53.926740 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:53Z","lastTransitionTime":"2025-11-22T04:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.033216 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.033287 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.033308 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.033331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.033348 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.135609 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.135671 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.135689 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.135713 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.135730 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.238958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.239005 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.239024 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.239043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.239057 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.342491 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.342550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.342567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.342592 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.342618 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.447040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.447091 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.447102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.447121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.447132 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.550564 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.550729 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.550763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.550790 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.550824 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.654516 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.654576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.654599 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.654628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.654652 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.757774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.757818 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.757835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.757856 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.757872 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.860363 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.860410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.860419 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.860431 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.860440 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.964081 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.964146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.964169 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.964201 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:54 crc kubenswrapper[4948]: I1122 04:47:54.964219 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:54Z","lastTransitionTime":"2025-11-22T04:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.068667 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.068726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.068744 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.068769 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.068787 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.171523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.171631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.171654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.171684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.171708 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.275025 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.275093 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.275126 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.275157 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.275179 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.377988 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.378077 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.378103 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.378133 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.378153 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.481771 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.481808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.481820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.481837 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.481849 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.585123 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.585187 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.585204 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.585226 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.585243 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.687960 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.687992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.688000 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.688012 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.688021 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.757840 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.757873 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:55 crc kubenswrapper[4948]: E1122 04:47:55.758128 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:55 crc kubenswrapper[4948]: E1122 04:47:55.757974 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.757994 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:55 crc kubenswrapper[4948]: E1122 04:47:55.758289 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.757923 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:55 crc kubenswrapper[4948]: E1122 04:47:55.758537 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.789927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.789990 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.790007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.790029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.790047 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.892390 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.892444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.892482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.892505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.892523 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.996705 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.996774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.996793 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.996820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:55 crc kubenswrapper[4948]: I1122 04:47:55.996838 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:55Z","lastTransitionTime":"2025-11-22T04:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.098716 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.098764 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.098780 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.098798 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.098812 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.201810 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.201864 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.201875 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.201895 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.201910 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.304014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.304064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.304080 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.304096 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.304107 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.406303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.406384 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.406407 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.406437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.406522 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.509273 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.509339 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.509361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.509392 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.509415 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.612278 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.612332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.612350 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.612379 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.612401 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.715054 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.715096 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.715110 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.715127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.715139 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.817859 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.817898 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.817908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.817920 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.817929 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.920290 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.920341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.920352 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.920369 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:56 crc kubenswrapper[4948]: I1122 04:47:56.920382 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:56Z","lastTransitionTime":"2025-11-22T04:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.022975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.023015 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.023023 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.023039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.023049 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.125836 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.125891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.125907 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.125924 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.125934 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.229313 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.229357 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.229367 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.229384 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.229396 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.331820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.331866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.331877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.331897 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.331910 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.435087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.435150 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.435171 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.435200 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.435223 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.538616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.538679 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.538696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.538718 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.538737 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.641365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.641410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.641427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.641451 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.641511 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.744254 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.744297 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.744311 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.744330 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.744343 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.746275 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.746334 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.746353 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.746378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.746397 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.760755 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.760796 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.760890 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.761150 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.761324 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.761398 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.761647 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.761448 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.768570 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:57Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.773448 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.773503 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.773514 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.773528 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.773541 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.793870 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:57Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.799374 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.799690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.799848 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.799998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.800138 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.822961 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:57Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.834329 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.834395 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.834413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.834438 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.834458 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.858380 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:57Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.864673 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.864942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.865197 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.865440 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.865725 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.887456 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"662538c3-26b0-4a27-a0b1-8418c7cea741\\\",\\\"systemUUID\\\":\\\"52e0db1d-3891-41f9-818e-4b9385ad1108\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:57Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:57 crc kubenswrapper[4948]: E1122 04:47:57.888144 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.890578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.890619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.890636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.890660 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.890680 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.994283 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.994338 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.994355 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.994378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:57 crc kubenswrapper[4948]: I1122 04:47:57.994395 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:57Z","lastTransitionTime":"2025-11-22T04:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.097946 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.098018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.098039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.098062 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.098080 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.200206 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.200617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.200776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.200930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.201075 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.303634 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.303667 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.303677 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.303690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.303699 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.405146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.405484 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.405598 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.405696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.405777 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.508523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.508584 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.508607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.508638 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.508660 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.611346 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.611772 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.612007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.612259 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.612422 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.716057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.716096 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.716109 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.716125 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.716138 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.818686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.819254 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.819340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.819417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.819528 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.922525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.922606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.922919 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.923246 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:58 crc kubenswrapper[4948]: I1122 04:47:58.923277 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:58Z","lastTransitionTime":"2025-11-22T04:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.031843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.031893 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.031909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.031932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.031949 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.134575 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.134632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.134648 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.134672 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.134690 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.238165 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.238603 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.238829 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.239053 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.239267 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.342401 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.342900 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.343105 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.343444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.343709 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.446156 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.446222 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.446240 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.446268 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.446285 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.549393 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.549522 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.549549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.549580 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.549603 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.652581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.652661 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.652687 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.652737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.652764 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.754722 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.754755 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.754764 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.754776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.754785 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.757582 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.757598 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.757694 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:47:59 crc kubenswrapper[4948]: E1122 04:47:59.757759 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:47:59 crc kubenswrapper[4948]: E1122 04:47:59.757800 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:47:59 crc kubenswrapper[4948]: E1122 04:47:59.757955 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.758069 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:47:59 crc kubenswrapper[4948]: E1122 04:47:59.758225 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.776420 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.790201 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.805150 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.815582 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.829238 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:48Z\\\",\\\"message\\\":\\\"2025-11-22T04:47:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8\\\\n2025-11-22T04:47:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8 to /host/opt/cni/bin/\\\\n2025-11-22T04:47:03Z [verbose] multus-daemon started\\\\n2025-11-22T04:47:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T04:47:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.840630 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237aebe7-ab7f-4d7e-acd0-f74079ec76e0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c8ccb7e7f0f0b46b13e82efb731c8cd06abbcf1924fe826ecb4a619efaa821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4cf0871be059cf435b4c1060a6c032c87a8c388bbd9ee2f114fc1528764c5dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4cf0871be059cf435b4c1060a6c032c87a8c388bbd9ee2f114fc1528764c5dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.854035 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.857074 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.857216 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.857304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.857394 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.857495 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.865407 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.875447 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.892080 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.907797 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.919185 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.936910 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.949054 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.960053 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.960080 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.960088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.960101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.960109 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:47:59Z","lastTransitionTime":"2025-11-22T04:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.961440 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.976792 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:47:59 crc kubenswrapper[4948]: I1122 04:47:59.999611 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:47:59Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.019217 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:00Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.062343 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.062411 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.062433 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.062458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.062503 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.243216 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.243253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.243264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.243281 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.243292 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.346353 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.346417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.346430 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.346453 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.346498 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.449061 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.449120 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.449137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.449162 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.449181 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.551082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.551145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.551166 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.551193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.551215 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.653857 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.653915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.653933 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.653956 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.653974 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.756246 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.756317 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.756351 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.756400 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.756424 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.859799 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.859883 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.859932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.859958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.859975 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.962725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.962791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.962814 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.962842 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:00 crc kubenswrapper[4948]: I1122 04:48:00.962862 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:00Z","lastTransitionTime":"2025-11-22T04:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.067013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.067085 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.067106 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.067136 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.067160 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.169913 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.170011 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.170061 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.170084 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.170101 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.272839 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.272905 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.272923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.272948 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.272966 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.378239 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.378276 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.378341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.378356 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.378365 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.480187 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.480232 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.480242 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.480263 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.480276 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.582572 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.582632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.582649 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.582674 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.582692 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.685595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.685628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.685636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.685650 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.685675 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.757849 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.757889 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:01 crc kubenswrapper[4948]: E1122 04:48:01.757990 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.758024 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.758099 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:01 crc kubenswrapper[4948]: E1122 04:48:01.758216 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:01 crc kubenswrapper[4948]: E1122 04:48:01.758298 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:01 crc kubenswrapper[4948]: E1122 04:48:01.758370 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.759018 4948 scope.go:117] "RemoveContainer" containerID="658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.787986 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.788014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.788023 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.788035 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.788046 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.891663 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.891735 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.891759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.891791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.891815 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.994574 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.994614 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.994655 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.994677 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:01 crc kubenswrapper[4948]: I1122 04:48:01.994690 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:01Z","lastTransitionTime":"2025-11-22T04:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.098648 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.098694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.098707 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.098726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.098738 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.201797 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.201837 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.201852 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.201872 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.201888 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.304097 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.304136 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.304148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.304170 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.304185 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.392952 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/2.log" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.395768 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.396130 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.406449 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.406505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.406518 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.406534 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.406561 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.408173 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a9d19e6eab2bec320d7a53200cabe1acd7761573bd5e04b33a43358aaa5c918\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.420827 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42ab8dc1-4daa-4eef-b2c7-9ffa774b8411\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0701f5e336fabcd66d2c34e9f1100e8dfcbcbf35fd12a988ff622ef1932f5a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ef52de50ad9c32e9e6201ace1eed4500ab6c33382cd3fc4ef7f6b2b0e6e0e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2l2pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7sg6l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.431290 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-btkdx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9a35ebfd-12d4-4129-9c61-9d5880130fa0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-b52vx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-btkdx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.448404 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad9f1eb0-86db-47df-85ea-ac21bccdff89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108b9349f773d7f1727fc93abc369322206989dac63f38e671e4f7daf4ee04fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ea61dfb4eea750435841c61fa63597d7a70ef176f2abc57a639244c0cf42f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d943a30bda8fe788faf92972601d8b8bdf590047fe1555b8675253b1c6ac0c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2e4e35cd6f7be75dc2f0a54e49240992f9353954c64b10bc10f1f94a70f3b91\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4354bf3f8046926733c1d9b98e40771cb889889387763f7f3896879d6637c6ea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"122 04:46:59.910248 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910276 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 04:46:59.910316 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 04:46:59.910402 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 04:46:59.910587 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\"\\\\nI1122 04:46:59.910991 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2141664254/tls.crt::/tmp/serving-cert-2141664254/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1763786804\\\\\\\\\\\\\\\" (2025-11-22 04:46:43 +0000 UTC to 2025-12-22 04:46:44 +0000 UTC (now=2025-11-22 04:46:59.909695995 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911349 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763786814\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763786814\\\\\\\\\\\\\\\" (2025-11-22 03:46:54 +0000 UTC to 2026-11-22 03:46:54 +0000 UTC (now=2025-11-22 04:46:59.91130003 +0000 UTC))\\\\\\\"\\\\nI1122 04:46:59.911395 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 04:46:59.911439 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 04:46:59.911449 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 04:46:59.914085 1 reflector.go:368] Caches populated for *v1.ConfigMap from k8s.io/client-go@v0.31.1/tools/cache/reflector.go:243\\\\nF1122 04:46:59.914348 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:44Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe04d3ce72e6d80aa2778a2dd2c171197cd3fd70624a2b192ee2305de4fecb67\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9422aa8fd896c8da3499888ecc8d3ff614d23d2511ef6c947a81096d2964b251\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.464943 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72367b430296a39b89e2458220e1e5295d9e622acaf3a6578f07e384f9ff9dd6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.476357 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://485118adc307cd10132869afff8cf76573b6e25e881e8c1843d72d8a35350d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8051a324a4c2cb5144536e9c6f5ba67196c63e97ffeb85c8fe1c0f03c5f3b1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.485285 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xfvlb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a357c8ff-8016-42b1-80b6-1ead105abc52\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88bfcb51d09066f68507e6a543bd9e5104fe1afe8ae7f20c1449710071160251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nlrsd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xfvlb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.501180 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bad3107e-91a9-463d-b981-fb102616bdbe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:36Z\\\",\\\"message\\\":\\\"ller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 04:47:36.431052 6693 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 04:47:36.431067 6693 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 04:47:36.431105 6693 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 04:47:36.431123 6693 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 04:47:36.431153 6693 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 04:47:36.431158 6693 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 04:47:36.431193 6693 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 04:47:36.431224 6693 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 04:47:36.431225 6693 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 04:47:36.431220 6693 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 04:47:36.431242 6693 factory.go:656] Stopping watch factory\\\\nI1122 04:47:36.431243 6693 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 04:47:36.431256 6693 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 04:47:36.431261 6693 ovnkube.go:599] Stopped ovnkube\\\\nI1122 04:47:36.431266 6693 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 04:47:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:48:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hrcwm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bspvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.508242 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.508267 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.508275 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.508287 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.508295 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.514777 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"720ed0a4-d93b-4f64-88f7-dfd7b218adc4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4206e8e3268d5118b26c172a4fbd2a6d61ee0fcdcd8d7730dfb629d33c80a436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a7017e7e3ff5f23e80285ccecebcc6cd9a02ef81c577f3e048bcb8f3b25bd843\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59d229178fda8225f37bd8fc2a10133cc3807aca2bd91c8ea174427c73e253fb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dad216fda6e0c8242f0ba526ccc588f3fa512c081b56204e5f0d970f5a192ff3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e45266235f4a287540d98c1285fe31f88718316296788a3838c6cce40e7d2247\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3db4f23217e7f042a935fed1160748530c835c5cdb9f66ec9190f69cb76f3d17\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16b4835619ac6429eb12d43c43f12f218e6f99b9f4fd17d4f80406c47a792bd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9vkzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kx6rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.526041 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"195fec03-cc3e-47ce-8bc5-0198069035df\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c24b2272f578fd637b361a618d34f66c84fdf6134f400b6ebfb166c2a102844f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d1fe94656aea97393a111f01b0afe628731ce5333694536f11d99b2cb287417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28944eab04a79c5062e1dae72e3eee905c5529f51b1a6f14745923fc275d48a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f5380479bf211ac39a42d348ce29a37f6c0b902a8e4c637a0540a505f458a87\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.537949 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80968630-7f3e-452e-b3b1-7701f236c14a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcac055b9e8a9c18a8a85c2dd5431ac15347b51754abeff9cdd7674fc37fdc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76179e4c6c703cf50cf3e05e4efba308e345ee95a83028cfb7587c5ea4306779\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9be15cc0fbe7648e16e5fc614849400dd5f20f7ace63f67fc2904e4ea9790867\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a473315cfc4ee5a97453dbbc6e29e93f4a59b7a420844e6de2b54dc59af8f15d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.550867 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.564541 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126f010b-a640-4133-b63f-d2976da99215\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0fdb7f218d7e4a0beb2e5141b05c250b75a0c1f69381fbf53b5fe441d8d75a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-phk4m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pf8gx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.578735 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mw95l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a2e6333-2885-4eaf-a4b3-6613127e6375\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T04:47:48Z\\\",\\\"message\\\":\\\"2025-11-22T04:47:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8\\\\n2025-11-22T04:47:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_97ab9d2b-69c4-44c6-8c41-42e31d24e3e8 to /host/opt/cni/bin/\\\\n2025-11-22T04:47:03Z [verbose] multus-daemon started\\\\n2025-11-22T04:47:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T04:47:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T04:47:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnwvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mw95l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.589976 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237aebe7-ab7f-4d7e-acd0-f74079ec76e0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://65c8ccb7e7f0f0b46b13e82efb731c8cd06abbcf1924fe826ecb4a619efaa821\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:46:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4cf0871be059cf435b4c1060a6c032c87a8c388bbd9ee2f114fc1528764c5dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4cf0871be059cf435b4c1060a6c032c87a8c388bbd9ee2f114fc1528764c5dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T04:46:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T04:46:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:46:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.603654 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.609870 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.609904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.609912 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.609925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.609933 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.615566 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T04:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.623525 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vthsw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad55002c-24cf-45f1-b251-f69c822a8d87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T04:47:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a82e307c346e296b35eaf141160650e693091618bcab3b00e964f810923053a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T04:47:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zvsd6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T04:47:02Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vthsw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T04:48:02Z is after 2025-08-24T17:21:41Z" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.712542 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.712575 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.712586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.712602 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.712614 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.815808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.815861 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.815875 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.815893 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.815906 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.918355 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.918398 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.918421 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.918444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:02 crc kubenswrapper[4948]: I1122 04:48:02.918581 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:02Z","lastTransitionTime":"2025-11-22T04:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.021450 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.021561 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.021586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.021614 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.021637 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.126672 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.126723 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.126739 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.126762 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.126778 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.228786 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.228824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.228836 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.228852 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.228865 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.331420 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.331518 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.331542 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.331565 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.331581 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.401677 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/3.log" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.402742 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/2.log" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.406534 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.406652 4948 scope.go:117] "RemoveContainer" containerID="658709ec66ad8fbc49090269a030063d7689eaae173d7a17bcbc53c7df928a29" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.407755 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.408281 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.406294 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" exitCode=1 Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.435772 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.435824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.435841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.435860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.435875 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.538647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.538692 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.538705 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.538725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.538738 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.548802 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7sg6l" podStartSLOduration=63.548775232 podStartE2EDuration="1m3.548775232s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.548614737 +0000 UTC m=+86.234625263" watchObservedRunningTime="2025-11-22 04:48:03.548775232 +0000 UTC m=+86.234785778" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.577360 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.577583 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.577545083 +0000 UTC m=+150.263555609 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.577840 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.578010 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.578081 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.578063927 +0000 UTC m=+150.264074453 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.589998 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-xfvlb" podStartSLOduration=64.589970583 podStartE2EDuration="1m4.589970583s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.5898634 +0000 UTC m=+86.275873926" watchObservedRunningTime="2025-11-22 04:48:03.589970583 +0000 UTC m=+86.275981099" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.628819 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-kx6rn" podStartSLOduration=64.628800947 podStartE2EDuration="1m4.628800947s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.628071157 +0000 UTC m=+86.314081673" watchObservedRunningTime="2025-11-22 04:48:03.628800947 +0000 UTC m=+86.314811473" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.641626 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.641663 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.641672 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.641686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.641696 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.643788 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=63.643770209 podStartE2EDuration="1m3.643770209s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.643205953 +0000 UTC m=+86.329216469" watchObservedRunningTime="2025-11-22 04:48:03.643770209 +0000 UTC m=+86.329780725" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.679126 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.679205 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.679242 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679322 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679419 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.679399363 +0000 UTC m=+150.365409879 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679449 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679485 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679545 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679563 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679497 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679623 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.679603499 +0000 UTC m=+150.365614015 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679634 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.679676 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.679664891 +0000 UTC m=+150.365675617 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.691641 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=62.691616508 podStartE2EDuration="1m2.691616508s" podCreationTimestamp="2025-11-22 04:47:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.691133914 +0000 UTC m=+86.377144420" watchObservedRunningTime="2025-11-22 04:48:03.691616508 +0000 UTC m=+86.377627024" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.741346 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podStartSLOduration=64.741319939 podStartE2EDuration="1m4.741319939s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.726950264 +0000 UTC m=+86.412960780" watchObservedRunningTime="2025-11-22 04:48:03.741319939 +0000 UTC m=+86.427330455" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.741448 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-mw95l" podStartSLOduration=64.741442812 podStartE2EDuration="1m4.741442812s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.740974939 +0000 UTC m=+86.426985465" watchObservedRunningTime="2025-11-22 04:48:03.741442812 +0000 UTC m=+86.427453338" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.743534 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.743572 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.743585 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.743603 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.743616 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.753788 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=37.75376722 podStartE2EDuration="37.75376722s" podCreationTimestamp="2025-11-22 04:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.753672427 +0000 UTC m=+86.439682953" watchObservedRunningTime="2025-11-22 04:48:03.75376722 +0000 UTC m=+86.439777746" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.757662 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.757722 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.757745 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.757778 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.757866 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.757926 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.757959 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:03 crc kubenswrapper[4948]: E1122 04:48:03.758040 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.780684 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-vthsw" podStartSLOduration=64.780667518 podStartE2EDuration="1m4.780667518s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.780010899 +0000 UTC m=+86.466021415" watchObservedRunningTime="2025-11-22 04:48:03.780667518 +0000 UTC m=+86.466678034" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.790231 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=13.790219167 podStartE2EDuration="13.790219167s" podCreationTimestamp="2025-11-22 04:47:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:03.790144895 +0000 UTC m=+86.476155421" watchObservedRunningTime="2025-11-22 04:48:03.790219167 +0000 UTC m=+86.476229683" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.845899 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.845927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.845934 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.845947 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.845956 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.948338 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.948393 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.948412 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.948445 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:03 crc kubenswrapper[4948]: I1122 04:48:03.948483 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:03Z","lastTransitionTime":"2025-11-22T04:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.051681 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.051742 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.051753 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.051776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.051793 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.155358 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.155415 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.155431 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.155452 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.155504 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.258708 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.258759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.258774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.258796 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.258811 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.362190 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.362247 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.362262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.362284 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.362300 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.414606 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/3.log" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.419574 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:48:04 crc kubenswrapper[4948]: E1122 04:48:04.419885 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.465757 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.465804 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.465815 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.465835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.465846 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.568382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.568448 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.568756 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.569060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.569123 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.673007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.673078 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.673099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.673124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.673144 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.777121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.777409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.777422 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.777441 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.777453 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.880654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.880702 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.880719 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.880737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.880748 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.983966 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.984017 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.984034 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.984057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:04 crc kubenswrapper[4948]: I1122 04:48:04.984077 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:04Z","lastTransitionTime":"2025-11-22T04:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.086670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.086704 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.086716 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.086733 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.086745 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.188412 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.188453 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.188479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.188496 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.188507 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.290836 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.290870 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.290879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.290891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.290900 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.393611 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.393668 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.393686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.393712 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.393731 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.496838 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.496902 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.496927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.496952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.496970 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.600058 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.600121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.600139 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.600163 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.600181 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.703417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.703528 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.703553 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.703585 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.703610 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.757881 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.757923 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.757953 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.757923 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:05 crc kubenswrapper[4948]: E1122 04:48:05.758119 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:05 crc kubenswrapper[4948]: E1122 04:48:05.758257 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:05 crc kubenswrapper[4948]: E1122 04:48:05.758397 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:05 crc kubenswrapper[4948]: E1122 04:48:05.758571 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.806774 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.806832 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.806857 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.806881 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.806895 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.909430 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.909517 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.909537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.909565 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:05 crc kubenswrapper[4948]: I1122 04:48:05.909583 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:05Z","lastTransitionTime":"2025-11-22T04:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.012900 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.012941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.012949 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.012963 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.012973 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.115953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.116018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.116034 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.116059 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.116075 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.219654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.219786 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.219809 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.219838 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.219859 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.322972 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.323056 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.323082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.323113 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.323137 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.425630 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.425697 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.425732 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.425769 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.425799 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.529115 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.529182 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.529198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.529223 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.529239 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.632657 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.632707 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.632717 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.632731 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.632741 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.735362 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.735395 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.735418 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.735436 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.735445 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.780954 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.838971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.839040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.839060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.839088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.839109 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.942313 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.942377 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.942398 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.942425 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:06 crc kubenswrapper[4948]: I1122 04:48:06.942448 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:06Z","lastTransitionTime":"2025-11-22T04:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.045617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.045706 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.045728 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.045759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.045783 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.148842 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.148908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.148925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.148948 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.148965 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.252289 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.252384 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.252409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.252437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.252454 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.356399 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.356506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.356525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.356548 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.356566 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.459628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.459690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.459710 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.459736 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.459756 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.562723 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.562784 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.562808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.562830 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.562842 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.665148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.665191 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.665201 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.665217 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.665225 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.758204 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.758210 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.758269 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.758333 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:07 crc kubenswrapper[4948]: E1122 04:48:07.758554 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:07 crc kubenswrapper[4948]: E1122 04:48:07.758711 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:07 crc kubenswrapper[4948]: E1122 04:48:07.758801 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:07 crc kubenswrapper[4948]: E1122 04:48:07.758853 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.767801 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.767843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.767855 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.767870 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.767882 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.869857 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.869909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.869922 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.869942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.869955 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.972372 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.972434 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.972454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.972504 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:07 crc kubenswrapper[4948]: I1122 04:48:07.972522 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:07Z","lastTransitionTime":"2025-11-22T04:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.075270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.075326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.075337 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.075361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.075375 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:08Z","lastTransitionTime":"2025-11-22T04:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.076917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.076971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.076983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.077000 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.077011 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T04:48:08Z","lastTransitionTime":"2025-11-22T04:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.124484 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h"] Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.124859 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.126716 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.126980 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.127066 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.128516 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.164274 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=2.16425894 podStartE2EDuration="2.16425894s" podCreationTimestamp="2025-11-22 04:48:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:08.163180989 +0000 UTC m=+90.849191505" watchObservedRunningTime="2025-11-22 04:48:08.16425894 +0000 UTC m=+90.850269446" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.231789 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/16699487-5e67-4198-9260-7000f156e047-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.231902 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/16699487-5e67-4198-9260-7000f156e047-service-ca\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.231957 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/16699487-5e67-4198-9260-7000f156e047-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.232012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16699487-5e67-4198-9260-7000f156e047-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.232089 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16699487-5e67-4198-9260-7000f156e047-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.332935 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16699487-5e67-4198-9260-7000f156e047-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.333047 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/16699487-5e67-4198-9260-7000f156e047-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.333102 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/16699487-5e67-4198-9260-7000f156e047-service-ca\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.333137 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/16699487-5e67-4198-9260-7000f156e047-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.333176 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16699487-5e67-4198-9260-7000f156e047-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.333280 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/16699487-5e67-4198-9260-7000f156e047-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.333382 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/16699487-5e67-4198-9260-7000f156e047-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.334173 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/16699487-5e67-4198-9260-7000f156e047-service-ca\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.347305 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16699487-5e67-4198-9260-7000f156e047-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.358091 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16699487-5e67-4198-9260-7000f156e047-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-cbc9h\" (UID: \"16699487-5e67-4198-9260-7000f156e047\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: I1122 04:48:08.451131 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" Nov 22 04:48:08 crc kubenswrapper[4948]: W1122 04:48:08.470797 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16699487_5e67_4198_9260_7000f156e047.slice/crio-62cd669a88390e041e51a4017f3ae5d73b2fdbba202fde9cc41935bd4d526ba2 WatchSource:0}: Error finding container 62cd669a88390e041e51a4017f3ae5d73b2fdbba202fde9cc41935bd4d526ba2: Status 404 returned error can't find the container with id 62cd669a88390e041e51a4017f3ae5d73b2fdbba202fde9cc41935bd4d526ba2 Nov 22 04:48:09 crc kubenswrapper[4948]: I1122 04:48:09.437997 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" event={"ID":"16699487-5e67-4198-9260-7000f156e047","Type":"ContainerStarted","Data":"715bf173041d870834ea706ee9dff7125fd2801922a7bf9caebbe5cdf803a926"} Nov 22 04:48:09 crc kubenswrapper[4948]: I1122 04:48:09.438443 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" event={"ID":"16699487-5e67-4198-9260-7000f156e047","Type":"ContainerStarted","Data":"62cd669a88390e041e51a4017f3ae5d73b2fdbba202fde9cc41935bd4d526ba2"} Nov 22 04:48:09 crc kubenswrapper[4948]: I1122 04:48:09.459025 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-cbc9h" podStartSLOduration=70.459002333 podStartE2EDuration="1m10.459002333s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:09.457285664 +0000 UTC m=+92.143296210" watchObservedRunningTime="2025-11-22 04:48:09.459002333 +0000 UTC m=+92.145012889" Nov 22 04:48:09 crc kubenswrapper[4948]: I1122 04:48:09.757343 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:09 crc kubenswrapper[4948]: I1122 04:48:09.757401 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:09 crc kubenswrapper[4948]: I1122 04:48:09.757400 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:09 crc kubenswrapper[4948]: I1122 04:48:09.757564 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:09 crc kubenswrapper[4948]: E1122 04:48:09.758981 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:09 crc kubenswrapper[4948]: E1122 04:48:09.759289 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:09 crc kubenswrapper[4948]: E1122 04:48:09.759333 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:09 crc kubenswrapper[4948]: E1122 04:48:09.759412 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:11 crc kubenswrapper[4948]: I1122 04:48:11.757415 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:11 crc kubenswrapper[4948]: I1122 04:48:11.757554 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:11 crc kubenswrapper[4948]: I1122 04:48:11.757569 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:11 crc kubenswrapper[4948]: I1122 04:48:11.757681 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:11 crc kubenswrapper[4948]: E1122 04:48:11.757818 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:11 crc kubenswrapper[4948]: E1122 04:48:11.757933 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:11 crc kubenswrapper[4948]: E1122 04:48:11.757988 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:11 crc kubenswrapper[4948]: E1122 04:48:11.758041 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:13 crc kubenswrapper[4948]: I1122 04:48:13.757598 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:13 crc kubenswrapper[4948]: I1122 04:48:13.757629 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:13 crc kubenswrapper[4948]: I1122 04:48:13.757670 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:13 crc kubenswrapper[4948]: E1122 04:48:13.757749 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:13 crc kubenswrapper[4948]: E1122 04:48:13.757836 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:13 crc kubenswrapper[4948]: I1122 04:48:13.757900 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:13 crc kubenswrapper[4948]: E1122 04:48:13.758040 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:13 crc kubenswrapper[4948]: E1122 04:48:13.758110 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:15 crc kubenswrapper[4948]: I1122 04:48:15.757978 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:15 crc kubenswrapper[4948]: E1122 04:48:15.758324 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:15 crc kubenswrapper[4948]: I1122 04:48:15.758360 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:15 crc kubenswrapper[4948]: I1122 04:48:15.758374 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:15 crc kubenswrapper[4948]: E1122 04:48:15.758534 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:15 crc kubenswrapper[4948]: I1122 04:48:15.758596 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:15 crc kubenswrapper[4948]: E1122 04:48:15.758674 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:15 crc kubenswrapper[4948]: E1122 04:48:15.758720 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:15 crc kubenswrapper[4948]: I1122 04:48:15.759404 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:48:15 crc kubenswrapper[4948]: E1122 04:48:15.759628 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:48:17 crc kubenswrapper[4948]: I1122 04:48:17.757088 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:17 crc kubenswrapper[4948]: E1122 04:48:17.757890 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:17 crc kubenswrapper[4948]: I1122 04:48:17.757154 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:17 crc kubenswrapper[4948]: I1122 04:48:17.757094 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:17 crc kubenswrapper[4948]: E1122 04:48:17.758234 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:17 crc kubenswrapper[4948]: I1122 04:48:17.757161 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:17 crc kubenswrapper[4948]: E1122 04:48:17.758496 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:17 crc kubenswrapper[4948]: E1122 04:48:17.758615 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:18 crc kubenswrapper[4948]: I1122 04:48:18.158652 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:18 crc kubenswrapper[4948]: E1122 04:48:18.158848 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:48:18 crc kubenswrapper[4948]: E1122 04:48:18.158929 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs podName:9a35ebfd-12d4-4129-9c61-9d5880130fa0 nodeName:}" failed. No retries permitted until 2025-11-22 04:49:22.158909632 +0000 UTC m=+164.844920158 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs") pod "network-metrics-daemon-btkdx" (UID: "9a35ebfd-12d4-4129-9c61-9d5880130fa0") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 04:48:19 crc kubenswrapper[4948]: I1122 04:48:19.759671 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:19 crc kubenswrapper[4948]: I1122 04:48:19.759704 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:19 crc kubenswrapper[4948]: I1122 04:48:19.759812 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:19 crc kubenswrapper[4948]: E1122 04:48:19.759953 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:19 crc kubenswrapper[4948]: I1122 04:48:19.759993 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:19 crc kubenswrapper[4948]: E1122 04:48:19.760139 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:19 crc kubenswrapper[4948]: E1122 04:48:19.760273 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:19 crc kubenswrapper[4948]: E1122 04:48:19.760354 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:21 crc kubenswrapper[4948]: I1122 04:48:21.758210 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:21 crc kubenswrapper[4948]: I1122 04:48:21.758299 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:21 crc kubenswrapper[4948]: I1122 04:48:21.758326 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:21 crc kubenswrapper[4948]: I1122 04:48:21.758219 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:21 crc kubenswrapper[4948]: E1122 04:48:21.758459 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:21 crc kubenswrapper[4948]: E1122 04:48:21.758618 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:21 crc kubenswrapper[4948]: E1122 04:48:21.758726 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:21 crc kubenswrapper[4948]: E1122 04:48:21.758827 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:23 crc kubenswrapper[4948]: I1122 04:48:23.757427 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:23 crc kubenswrapper[4948]: E1122 04:48:23.757592 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:23 crc kubenswrapper[4948]: I1122 04:48:23.757709 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:23 crc kubenswrapper[4948]: I1122 04:48:23.757763 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:23 crc kubenswrapper[4948]: E1122 04:48:23.757999 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:23 crc kubenswrapper[4948]: E1122 04:48:23.758053 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:23 crc kubenswrapper[4948]: I1122 04:48:23.758565 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:23 crc kubenswrapper[4948]: E1122 04:48:23.758818 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:25 crc kubenswrapper[4948]: I1122 04:48:25.757282 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:25 crc kubenswrapper[4948]: E1122 04:48:25.758067 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:25 crc kubenswrapper[4948]: I1122 04:48:25.757325 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:25 crc kubenswrapper[4948]: E1122 04:48:25.758256 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:25 crc kubenswrapper[4948]: I1122 04:48:25.757302 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:25 crc kubenswrapper[4948]: E1122 04:48:25.758519 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:25 crc kubenswrapper[4948]: I1122 04:48:25.757411 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:25 crc kubenswrapper[4948]: E1122 04:48:25.758704 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:27 crc kubenswrapper[4948]: I1122 04:48:27.757654 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:27 crc kubenswrapper[4948]: I1122 04:48:27.757657 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:27 crc kubenswrapper[4948]: I1122 04:48:27.758582 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:27 crc kubenswrapper[4948]: I1122 04:48:27.758849 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:27 crc kubenswrapper[4948]: E1122 04:48:27.759083 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:27 crc kubenswrapper[4948]: E1122 04:48:27.758989 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:27 crc kubenswrapper[4948]: E1122 04:48:27.758857 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:27 crc kubenswrapper[4948]: E1122 04:48:27.759444 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:29 crc kubenswrapper[4948]: I1122 04:48:29.757633 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:29 crc kubenswrapper[4948]: I1122 04:48:29.757733 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:29 crc kubenswrapper[4948]: E1122 04:48:29.760028 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:29 crc kubenswrapper[4948]: I1122 04:48:29.760086 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:29 crc kubenswrapper[4948]: I1122 04:48:29.760133 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:29 crc kubenswrapper[4948]: E1122 04:48:29.760619 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:29 crc kubenswrapper[4948]: E1122 04:48:29.760752 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:29 crc kubenswrapper[4948]: E1122 04:48:29.760878 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:29 crc kubenswrapper[4948]: I1122 04:48:29.761286 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:48:29 crc kubenswrapper[4948]: E1122 04:48:29.761635 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bspvz_openshift-ovn-kubernetes(bad3107e-91a9-463d-b981-fb102616bdbe)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" Nov 22 04:48:31 crc kubenswrapper[4948]: I1122 04:48:31.757660 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:31 crc kubenswrapper[4948]: I1122 04:48:31.757698 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:31 crc kubenswrapper[4948]: I1122 04:48:31.757806 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:31 crc kubenswrapper[4948]: I1122 04:48:31.757845 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:31 crc kubenswrapper[4948]: E1122 04:48:31.758050 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:31 crc kubenswrapper[4948]: E1122 04:48:31.758151 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:31 crc kubenswrapper[4948]: E1122 04:48:31.758388 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:31 crc kubenswrapper[4948]: E1122 04:48:31.758763 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:33 crc kubenswrapper[4948]: I1122 04:48:33.757385 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:33 crc kubenswrapper[4948]: I1122 04:48:33.757536 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:33 crc kubenswrapper[4948]: E1122 04:48:33.757599 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:33 crc kubenswrapper[4948]: I1122 04:48:33.757647 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:33 crc kubenswrapper[4948]: I1122 04:48:33.757704 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:33 crc kubenswrapper[4948]: E1122 04:48:33.757746 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:33 crc kubenswrapper[4948]: E1122 04:48:33.757963 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:33 crc kubenswrapper[4948]: E1122 04:48:33.758288 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.594577 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/1.log" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.598066 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/0.log" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.598149 4948 generic.go:334] "Generic (PLEG): container finished" podID="7a2e6333-2885-4eaf-a4b3-6613127e6375" containerID="c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f" exitCode=1 Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.598199 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerDied","Data":"c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f"} Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.598255 4948 scope.go:117] "RemoveContainer" containerID="52bfe151397ef9c2b570c38b87a0bec657eb47b87cedc0128cde9c7ea41c788e" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.598976 4948 scope.go:117] "RemoveContainer" containerID="c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f" Nov 22 04:48:35 crc kubenswrapper[4948]: E1122 04:48:35.599289 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-mw95l_openshift-multus(7a2e6333-2885-4eaf-a4b3-6613127e6375)\"" pod="openshift-multus/multus-mw95l" podUID="7a2e6333-2885-4eaf-a4b3-6613127e6375" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.757514 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.757517 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.757661 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:35 crc kubenswrapper[4948]: E1122 04:48:35.757713 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:35 crc kubenswrapper[4948]: I1122 04:48:35.757528 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:35 crc kubenswrapper[4948]: E1122 04:48:35.757847 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:35 crc kubenswrapper[4948]: E1122 04:48:35.757946 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:35 crc kubenswrapper[4948]: E1122 04:48:35.758094 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:36 crc kubenswrapper[4948]: I1122 04:48:36.609869 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/1.log" Nov 22 04:48:37 crc kubenswrapper[4948]: I1122 04:48:37.757065 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:37 crc kubenswrapper[4948]: I1122 04:48:37.757155 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:37 crc kubenswrapper[4948]: I1122 04:48:37.757073 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:37 crc kubenswrapper[4948]: I1122 04:48:37.757080 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:37 crc kubenswrapper[4948]: E1122 04:48:37.757583 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:37 crc kubenswrapper[4948]: E1122 04:48:37.757652 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:37 crc kubenswrapper[4948]: E1122 04:48:37.757918 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:37 crc kubenswrapper[4948]: E1122 04:48:37.758073 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:39 crc kubenswrapper[4948]: E1122 04:48:39.720951 4948 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 22 04:48:39 crc kubenswrapper[4948]: I1122 04:48:39.758786 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:39 crc kubenswrapper[4948]: I1122 04:48:39.758907 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:39 crc kubenswrapper[4948]: I1122 04:48:39.758910 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:39 crc kubenswrapper[4948]: I1122 04:48:39.760015 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:39 crc kubenswrapper[4948]: E1122 04:48:39.759997 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:39 crc kubenswrapper[4948]: E1122 04:48:39.760210 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:39 crc kubenswrapper[4948]: E1122 04:48:39.760500 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:39 crc kubenswrapper[4948]: E1122 04:48:39.760660 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:39 crc kubenswrapper[4948]: E1122 04:48:39.844033 4948 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 04:48:41 crc kubenswrapper[4948]: I1122 04:48:41.757398 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:41 crc kubenswrapper[4948]: I1122 04:48:41.757575 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:41 crc kubenswrapper[4948]: I1122 04:48:41.757604 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:41 crc kubenswrapper[4948]: I1122 04:48:41.757656 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:41 crc kubenswrapper[4948]: E1122 04:48:41.758192 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:41 crc kubenswrapper[4948]: E1122 04:48:41.758347 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:41 crc kubenswrapper[4948]: E1122 04:48:41.758551 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:41 crc kubenswrapper[4948]: E1122 04:48:41.758071 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:43 crc kubenswrapper[4948]: I1122 04:48:43.757372 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:43 crc kubenswrapper[4948]: I1122 04:48:43.757684 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:43 crc kubenswrapper[4948]: E1122 04:48:43.757680 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:43 crc kubenswrapper[4948]: I1122 04:48:43.757724 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:43 crc kubenswrapper[4948]: E1122 04:48:43.758062 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:43 crc kubenswrapper[4948]: E1122 04:48:43.758215 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:43 crc kubenswrapper[4948]: I1122 04:48:43.758370 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:43 crc kubenswrapper[4948]: E1122 04:48:43.758574 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:43 crc kubenswrapper[4948]: I1122 04:48:43.758875 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:48:44 crc kubenswrapper[4948]: I1122 04:48:44.637767 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/3.log" Nov 22 04:48:44 crc kubenswrapper[4948]: I1122 04:48:44.640282 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerStarted","Data":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} Nov 22 04:48:44 crc kubenswrapper[4948]: I1122 04:48:44.640688 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:48:44 crc kubenswrapper[4948]: I1122 04:48:44.701295 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podStartSLOduration=105.701270925 podStartE2EDuration="1m45.701270925s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:48:44.679249254 +0000 UTC m=+127.365259770" watchObservedRunningTime="2025-11-22 04:48:44.701270925 +0000 UTC m=+127.387281451" Nov 22 04:48:44 crc kubenswrapper[4948]: I1122 04:48:44.701971 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-btkdx"] Nov 22 04:48:44 crc kubenswrapper[4948]: I1122 04:48:44.702086 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:44 crc kubenswrapper[4948]: E1122 04:48:44.702191 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:44 crc kubenswrapper[4948]: E1122 04:48:44.845417 4948 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 04:48:45 crc kubenswrapper[4948]: I1122 04:48:45.757594 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:45 crc kubenswrapper[4948]: I1122 04:48:45.757632 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:45 crc kubenswrapper[4948]: I1122 04:48:45.757594 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:45 crc kubenswrapper[4948]: E1122 04:48:45.757809 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:45 crc kubenswrapper[4948]: E1122 04:48:45.757997 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:45 crc kubenswrapper[4948]: E1122 04:48:45.758156 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:46 crc kubenswrapper[4948]: I1122 04:48:46.757199 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:46 crc kubenswrapper[4948]: E1122 04:48:46.757402 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:47 crc kubenswrapper[4948]: I1122 04:48:47.757380 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:47 crc kubenswrapper[4948]: I1122 04:48:47.757508 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:47 crc kubenswrapper[4948]: I1122 04:48:47.758097 4948 scope.go:117] "RemoveContainer" containerID="c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f" Nov 22 04:48:47 crc kubenswrapper[4948]: I1122 04:48:47.757581 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:47 crc kubenswrapper[4948]: E1122 04:48:47.758197 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:47 crc kubenswrapper[4948]: E1122 04:48:47.757961 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:47 crc kubenswrapper[4948]: E1122 04:48:47.758355 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:48 crc kubenswrapper[4948]: I1122 04:48:48.661016 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/1.log" Nov 22 04:48:48 crc kubenswrapper[4948]: I1122 04:48:48.661082 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerStarted","Data":"65292ad5c1b26c893ba431368e9d2a9c6cf6a06c2be1de9ff0dad5f538be179f"} Nov 22 04:48:48 crc kubenswrapper[4948]: I1122 04:48:48.757801 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:48 crc kubenswrapper[4948]: E1122 04:48:48.757948 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-btkdx" podUID="9a35ebfd-12d4-4129-9c61-9d5880130fa0" Nov 22 04:48:49 crc kubenswrapper[4948]: I1122 04:48:49.757072 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:49 crc kubenswrapper[4948]: E1122 04:48:49.758939 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 04:48:49 crc kubenswrapper[4948]: I1122 04:48:49.759084 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:49 crc kubenswrapper[4948]: I1122 04:48:49.759109 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:49 crc kubenswrapper[4948]: E1122 04:48:49.759346 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 04:48:49 crc kubenswrapper[4948]: E1122 04:48:49.759449 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 04:48:50 crc kubenswrapper[4948]: I1122 04:48:50.757599 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:48:50 crc kubenswrapper[4948]: I1122 04:48:50.761097 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 22 04:48:50 crc kubenswrapper[4948]: I1122 04:48:50.761259 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 22 04:48:51 crc kubenswrapper[4948]: I1122 04:48:51.757596 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:48:51 crc kubenswrapper[4948]: I1122 04:48:51.757725 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:48:51 crc kubenswrapper[4948]: I1122 04:48:51.757868 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:48:51 crc kubenswrapper[4948]: I1122 04:48:51.760644 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 22 04:48:51 crc kubenswrapper[4948]: I1122 04:48:51.760644 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 22 04:48:51 crc kubenswrapper[4948]: I1122 04:48:51.760744 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 22 04:48:51 crc kubenswrapper[4948]: I1122 04:48:51.762235 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 22 04:48:53 crc kubenswrapper[4948]: I1122 04:48:53.449168 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.336458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.399277 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-xkfvj"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.399831 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.400007 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.403869 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.404023 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.404312 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-mgqq5"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.404663 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-fznqf"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.404822 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.404910 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.405199 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.405221 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.405200 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.415685 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.415872 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416084 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416148 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416334 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416389 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416410 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416524 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416530 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416665 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416891 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.416913 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417046 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417095 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417182 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417203 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417565 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417646 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417785 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417651 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417926 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417677 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.417708 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418335 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418429 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418502 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418435 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418630 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418691 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418707 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418737 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418825 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418869 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418895 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.418955 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.419025 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.419058 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.419032 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.419162 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.419286 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.421041 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.421258 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w8qrc"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.422175 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.422773 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-lpvjc"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.423406 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-lpvjc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.429655 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.429929 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.439857 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-nfpm6"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.439987 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.440605 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.441327 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.441933 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.443347 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.444757 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.451231 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.458955 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.459076 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.459625 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460008 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460157 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460165 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460257 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460699 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460769 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460806 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.460937 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.461060 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.461166 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.461704 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.461773 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.461963 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.462519 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.463050 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.463217 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.463272 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ttcbt"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.464010 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.464651 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.470740 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.471377 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.472451 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-kmbfh"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.472955 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.475111 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.475709 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.476112 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477032 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477321 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477441 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477562 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477633 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477724 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477767 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477836 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.477866 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.478023 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.478327 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.478500 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.478667 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.478821 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.479703 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.479864 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.480100 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.481848 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-mgqq5"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.482008 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.482260 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.482484 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.483003 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.483872 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2vbhk"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.484365 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.484590 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.486823 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.489092 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v78bn"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.489775 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.490138 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-fznqf"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.492953 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.494392 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.494569 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.496404 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.496872 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.497169 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.497320 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.497454 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.499862 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.500502 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.501742 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.513204 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.514313 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.516076 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.516444 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.519360 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.520902 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.534662 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.538179 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.539450 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.540177 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.542994 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-xkfvj"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.543735 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.547594 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.551688 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kmbfh"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.551717 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t9v4g"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.552253 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.552475 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-qrscw"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.553006 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-qrscw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.554377 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.554929 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.555162 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-gld76"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.555596 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.556275 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.556837 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.557599 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vgmn6"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.558178 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.559293 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-n4hqv"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.559663 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.560869 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.561494 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.561634 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.561947 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.562486 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.562931 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.563946 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.564119 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.564611 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.565064 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-22sb9"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.565536 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.566241 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bcq8w"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.566607 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.567424 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.568923 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.569053 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.570321 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.570545 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.571614 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-h8wxv"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.571845 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.572589 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-7678m"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.572818 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.573382 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w8qrc"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.573507 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.574690 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.575249 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.575695 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.576582 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.576734 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-lpvjc"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.578002 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.581516 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.587160 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.587270 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.587767 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-nfpm6"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.587784 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.587865 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.588756 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-n4hqv"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.591439 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.594660 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.595414 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/166e874d-66a8-4f45-9290-43b3578139a6-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.595454 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.595983 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v88n\" (UniqueName: \"kubernetes.io/projected/ba391380-ff01-461f-b4e7-daa1f18d0198-kube-api-access-5v88n\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596019 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-auth-proxy-config\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596046 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/024d4cbd-d082-4074-a271-b01445f26510-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596073 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/656450bf-ce50-4fc8-863e-274359778f85-audit-dir\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596099 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-machine-approver-tls\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596131 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv2qs\" (UniqueName: \"kubernetes.io/projected/024d4cbd-d082-4074-a271-b01445f26510-kube-api-access-jv2qs\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596153 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-config\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596178 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-images\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596202 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-config\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596226 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-audit-policies\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596247 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-config\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596271 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.596300 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598058 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/166e874d-66a8-4f45-9290-43b3578139a6-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598106 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/024d4cbd-d082-4074-a271-b01445f26510-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598132 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598163 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-config\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598187 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x54sv\" (UniqueName: \"kubernetes.io/projected/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-kube-api-access-x54sv\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598210 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-serving-cert\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598234 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8c30c504-f3c7-4f34-878d-33fdbb884ffe-trusted-ca\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598258 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598283 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/005ccf51-a81d-4fa0-b9ac-468732a14edf-audit-dir\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598310 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598332 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598355 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598377 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598400 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q68pt\" (UniqueName: \"kubernetes.io/projected/fdc54e28-5b97-48c7-824a-3ed65296e98e-kube-api-access-q68pt\") pod \"dns-operator-744455d44c-w8qrc\" (UID: \"fdc54e28-5b97-48c7-824a-3ed65296e98e\") " pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598425 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-serving-cert\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598447 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkzqs\" (UniqueName: \"kubernetes.io/projected/9a821dfa-73f0-4d83-b480-f566a1ce12fc-kube-api-access-pkzqs\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598490 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvdl2\" (UniqueName: \"kubernetes.io/projected/45a847ab-7976-4dbe-9ccc-5c89490b7c52-kube-api-access-qvdl2\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598519 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-service-ca\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598559 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-encryption-config\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598579 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jm8hv\" (UniqueName: \"kubernetes.io/projected/c74bf710-a8fa-4d55-9f68-771f56c145f7-kube-api-access-jm8hv\") pod \"downloads-7954f5f757-lpvjc\" (UID: \"c74bf710-a8fa-4d55-9f68-771f56c145f7\") " pod="openshift-console/downloads-7954f5f757-lpvjc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598624 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-service-ca-bundle\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598651 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598690 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598718 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ljsv\" (UniqueName: \"kubernetes.io/projected/005ccf51-a81d-4fa0-b9ac-468732a14edf-kube-api-access-9ljsv\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598742 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2crl\" (UniqueName: \"kubernetes.io/projected/8c30c504-f3c7-4f34-878d-33fdbb884ffe-kube-api-access-b2crl\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598783 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba391380-ff01-461f-b4e7-daa1f18d0198-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598803 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ba391380-ff01-461f-b4e7-daa1f18d0198-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598842 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqt45\" (UniqueName: \"kubernetes.io/projected/166e874d-66a8-4f45-9290-43b3578139a6-kube-api-access-lqt45\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598894 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ce40099-332b-49b0-8eee-914df6a6a572-console-oauth-config\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.598916 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbhpr\" (UniqueName: \"kubernetes.io/projected/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-kube-api-access-pbhpr\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599004 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ba391380-ff01-461f-b4e7-daa1f18d0198-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599052 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599108 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599133 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-console-config\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599172 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-trusted-ca-bundle\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599214 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbpcc\" (UniqueName: \"kubernetes.io/projected/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-kube-api-access-nbpcc\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599236 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-audit-policies\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599271 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c30c504-f3c7-4f34-878d-33fdbb884ffe-config\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599295 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-etcd-client\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599329 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c30c504-f3c7-4f34-878d-33fdbb884ffe-serving-cert\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599359 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45a847ab-7976-4dbe-9ccc-5c89490b7c52-serving-cert\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599385 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fdc54e28-5b97-48c7-824a-3ed65296e98e-metrics-tls\") pod \"dns-operator-744455d44c-w8qrc\" (UID: \"fdc54e28-5b97-48c7-824a-3ed65296e98e\") " pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599419 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599533 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599616 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mqbs\" (UniqueName: \"kubernetes.io/projected/656450bf-ce50-4fc8-863e-274359778f85-kube-api-access-9mqbs\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599688 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-client-ca\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599763 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-client-ca\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.599810 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600010 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-oauth-serving-cert\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600103 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ce40099-332b-49b0-8eee-914df6a6a572-console-serving-cert\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600132 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7ztq\" (UniqueName: \"kubernetes.io/projected/3ce40099-332b-49b0-8eee-914df6a6a572-kube-api-access-p7ztq\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600259 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1055fda6-8601-43b2-a82c-0dc3ddedc96a-serving-cert\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600325 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ww787\" (UniqueName: \"kubernetes.io/projected/1055fda6-8601-43b2-a82c-0dc3ddedc96a-kube-api-access-ww787\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600353 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600379 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a821dfa-73f0-4d83-b480-f566a1ce12fc-serving-cert\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.600403 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-config\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.601539 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-gld76"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.601913 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.608276 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.609842 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-qrscw"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.610968 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.611685 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.612822 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bcq8w"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.613830 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.614895 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ttcbt"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.615910 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t9v4g"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.617049 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v78bn"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.618180 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2vbhk"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.619214 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5wl5s"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.620239 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.620292 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.621490 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.622710 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-7678m"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.624010 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.625201 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.626282 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.627304 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.628357 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.630507 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vgmn6"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.630528 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5wl5s"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.632582 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.632996 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.634308 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-ljtvf"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.634885 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.635441 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ljtvf"] Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.642075 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.662770 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.702311 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkzqs\" (UniqueName: \"kubernetes.io/projected/9a821dfa-73f0-4d83-b480-f566a1ce12fc-kube-api-access-pkzqs\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.702531 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvdl2\" (UniqueName: \"kubernetes.io/projected/45a847ab-7976-4dbe-9ccc-5c89490b7c52-kube-api-access-qvdl2\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.702669 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-service-ca\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.702808 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-encryption-config\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.702907 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jm8hv\" (UniqueName: \"kubernetes.io/projected/c74bf710-a8fa-4d55-9f68-771f56c145f7-kube-api-access-jm8hv\") pod \"downloads-7954f5f757-lpvjc\" (UID: \"c74bf710-a8fa-4d55-9f68-771f56c145f7\") " pod="openshift-console/downloads-7954f5f757-lpvjc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.703071 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-service-ca-bundle\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.703176 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.703314 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.703514 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ljsv\" (UniqueName: \"kubernetes.io/projected/005ccf51-a81d-4fa0-b9ac-468732a14edf-kube-api-access-9ljsv\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.703800 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2crl\" (UniqueName: \"kubernetes.io/projected/8c30c504-f3c7-4f34-878d-33fdbb884ffe-kube-api-access-b2crl\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.703892 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-service-ca\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704590 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba391380-ff01-461f-b4e7-daa1f18d0198-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704614 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ba391380-ff01-461f-b4e7-daa1f18d0198-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704704 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqt45\" (UniqueName: \"kubernetes.io/projected/166e874d-66a8-4f45-9290-43b3578139a6-kube-api-access-lqt45\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704554 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-service-ca-bundle\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704888 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ce40099-332b-49b0-8eee-914df6a6a572-console-oauth-config\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704928 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbhpr\" (UniqueName: \"kubernetes.io/projected/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-kube-api-access-pbhpr\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704895 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.704961 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ba391380-ff01-461f-b4e7-daa1f18d0198-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705117 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705148 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705172 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-console-config\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705194 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-trusted-ca-bundle\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705217 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbpcc\" (UniqueName: \"kubernetes.io/projected/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-kube-api-access-nbpcc\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705242 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-audit-policies\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705267 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c30c504-f3c7-4f34-878d-33fdbb884ffe-config\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705288 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-etcd-client\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705309 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c30c504-f3c7-4f34-878d-33fdbb884ffe-serving-cert\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705336 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45a847ab-7976-4dbe-9ccc-5c89490b7c52-serving-cert\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705358 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fdc54e28-5b97-48c7-824a-3ed65296e98e-metrics-tls\") pod \"dns-operator-744455d44c-w8qrc\" (UID: \"fdc54e28-5b97-48c7-824a-3ed65296e98e\") " pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705380 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705407 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705442 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mqbs\" (UniqueName: \"kubernetes.io/projected/656450bf-ce50-4fc8-863e-274359778f85-kube-api-access-9mqbs\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705493 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-client-ca\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705517 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-client-ca\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705541 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705577 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-oauth-serving-cert\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705609 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ce40099-332b-49b0-8eee-914df6a6a572-console-serving-cert\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705631 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7ztq\" (UniqueName: \"kubernetes.io/projected/3ce40099-332b-49b0-8eee-914df6a6a572-kube-api-access-p7ztq\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705654 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1055fda6-8601-43b2-a82c-0dc3ddedc96a-serving-cert\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705675 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ww787\" (UniqueName: \"kubernetes.io/projected/1055fda6-8601-43b2-a82c-0dc3ddedc96a-kube-api-access-ww787\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705730 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705762 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a821dfa-73f0-4d83-b480-f566a1ce12fc-serving-cert\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705790 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-config\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705819 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ba391380-ff01-461f-b4e7-daa1f18d0198-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705814 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/166e874d-66a8-4f45-9290-43b3578139a6-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705874 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705900 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v88n\" (UniqueName: \"kubernetes.io/projected/ba391380-ff01-461f-b4e7-daa1f18d0198-kube-api-access-5v88n\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705918 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-auth-proxy-config\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705939 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/024d4cbd-d082-4074-a271-b01445f26510-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705961 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/656450bf-ce50-4fc8-863e-274359778f85-audit-dir\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.705986 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-machine-approver-tls\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706012 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv2qs\" (UniqueName: \"kubernetes.io/projected/024d4cbd-d082-4074-a271-b01445f26510-kube-api-access-jv2qs\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706037 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-config\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706060 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-images\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706082 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-config\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706104 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-audit-policies\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706127 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-config\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706152 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706181 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706209 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/166e874d-66a8-4f45-9290-43b3578139a6-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706234 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/024d4cbd-d082-4074-a271-b01445f26510-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706260 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706284 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-config\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706306 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x54sv\" (UniqueName: \"kubernetes.io/projected/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-kube-api-access-x54sv\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706326 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706330 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-serving-cert\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706403 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8c30c504-f3c7-4f34-878d-33fdbb884ffe-trusted-ca\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706431 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706455 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/005ccf51-a81d-4fa0-b9ac-468732a14edf-audit-dir\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706534 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706569 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706596 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706621 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706653 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q68pt\" (UniqueName: \"kubernetes.io/projected/fdc54e28-5b97-48c7-824a-3ed65296e98e-kube-api-access-q68pt\") pod \"dns-operator-744455d44c-w8qrc\" (UID: \"fdc54e28-5b97-48c7-824a-3ed65296e98e\") " pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706665 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/166e874d-66a8-4f45-9290-43b3578139a6-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.706675 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-serving-cert\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.707637 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-config\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.708259 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.708630 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-config\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.708737 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-client-ca\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.708908 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.709083 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/024d4cbd-d082-4074-a271-b01445f26510-config\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.709415 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-client-ca\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.709429 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-auth-proxy-config\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.709528 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1055fda6-8601-43b2-a82c-0dc3ddedc96a-config\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.709683 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba391380-ff01-461f-b4e7-daa1f18d0198-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.709933 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.710068 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.710150 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-encryption-config\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.710270 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/656450bf-ce50-4fc8-863e-274359778f85-audit-dir\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.710375 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-audit-policies\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.711816 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.711825 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-audit-policies\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.711987 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-oauth-serving-cert\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712052 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/166e874d-66a8-4f45-9290-43b3578139a6-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712194 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-config\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712230 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-console-config\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712281 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/005ccf51-a81d-4fa0-b9ac-468732a14edf-audit-dir\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712321 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712711 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-images\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712500 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/005ccf51-a81d-4fa0-b9ac-468732a14edf-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712452 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8c30c504-f3c7-4f34-878d-33fdbb884ffe-trusted-ca\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.712880 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.713318 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/024d4cbd-d082-4074-a271-b01445f26510-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.713533 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ce40099-332b-49b0-8eee-914df6a6a572-trusted-ca-bundle\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.713604 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45a847ab-7976-4dbe-9ccc-5c89490b7c52-serving-cert\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.713970 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.714426 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-config\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.714653 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-machine-approver-tls\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.714744 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ce40099-332b-49b0-8eee-914df6a6a572-console-oauth-config\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.714761 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.714914 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fdc54e28-5b97-48c7-824a-3ed65296e98e-metrics-tls\") pod \"dns-operator-744455d44c-w8qrc\" (UID: \"fdc54e28-5b97-48c7-824a-3ed65296e98e\") " pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.715262 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-etcd-client\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.715582 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ce40099-332b-49b0-8eee-914df6a6a572-console-serving-cert\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.715734 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.715932 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/005ccf51-a81d-4fa0-b9ac-468732a14edf-serving-cert\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.715940 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-serving-cert\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.716156 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.716769 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.717775 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.717861 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1055fda6-8601-43b2-a82c-0dc3ddedc96a-serving-cert\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.718053 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.719095 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a821dfa-73f0-4d83-b480-f566a1ce12fc-serving-cert\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.722256 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.741915 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.762004 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.788096 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.801621 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.821898 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.841654 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.871862 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.882187 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.902532 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.921677 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.959698 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.973068 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 22 04:48:59 crc kubenswrapper[4948]: I1122 04:48:59.982297 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.003720 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.022208 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.042401 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.063026 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.082941 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.102817 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.122872 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.128196 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c30c504-f3c7-4f34-878d-33fdbb884ffe-config\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.128376 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c30c504-f3c7-4f34-878d-33fdbb884ffe-serving-cert\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.142871 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.162583 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.183556 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.203326 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.222894 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.242393 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.263658 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.282936 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.304155 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.323410 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.343120 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.363083 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.382833 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.403546 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.422730 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.442330 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.463617 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.483401 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.503656 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.522494 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.543383 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.563417 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.580964 4948 request.go:700] Waited for 1.01612737s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/secrets?fieldSelector=metadata.name%3Dkube-storage-version-migrator-operator-dockercfg-2bh8d&limit=500&resourceVersion=0 Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.584825 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.603852 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.622904 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.643544 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.663846 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.683028 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.703384 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.722694 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.754790 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.763922 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.787460 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.802487 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.824232 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.843537 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.863955 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.884728 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.902600 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.923506 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.943659 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.962645 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 22 04:49:00 crc kubenswrapper[4948]: I1122 04:49:00.982905 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.002565 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.023175 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.043367 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.063226 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.084119 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.103527 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.123683 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.142988 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.162373 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.182949 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.202900 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.222499 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.242918 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.262452 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.284359 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.303005 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.322312 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.342601 4948 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.363074 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.381970 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.401936 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.423251 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.443739 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.509186 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkzqs\" (UniqueName: \"kubernetes.io/projected/9a821dfa-73f0-4d83-b480-f566a1ce12fc-kube-api-access-pkzqs\") pod \"controller-manager-879f6c89f-fznqf\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.526166 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvdl2\" (UniqueName: \"kubernetes.io/projected/45a847ab-7976-4dbe-9ccc-5c89490b7c52-kube-api-access-qvdl2\") pod \"route-controller-manager-6576b87f9c-l7pw8\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530156 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d2caf26-e32d-412f-b764-a050f5a5840c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530209 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-certificates\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530233 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-etcd-client\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530271 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kzvh\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-kube-api-access-6kzvh\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530310 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-serving-cert\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530352 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-encryption-config\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530389 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530411 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-tls\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530431 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1682ca74-e62f-492c-8de6-fa0fc27c5b39-node-pullsecrets\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530498 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1682ca74-e62f-492c-8de6-fa0fc27c5b39-audit-dir\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530566 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-trusted-ca-bundle\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530590 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8f7g\" (UniqueName: \"kubernetes.io/projected/1682ca74-e62f-492c-8de6-fa0fc27c5b39-kube-api-access-n8f7g\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530615 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-etcd-serving-ca\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530647 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-trusted-ca\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530667 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-config\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530692 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-audit\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530734 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d2caf26-e32d-412f-b764-a050f5a5840c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530758 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-image-import-ca\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530782 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-bound-sa-token\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530805 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lhld\" (UniqueName: \"kubernetes.io/projected/104266b0-fe3d-4831-8853-e0f964dee743-kube-api-access-2lhld\") pod \"cluster-samples-operator-665b6dd947-5tvbr\" (UID: \"104266b0-fe3d-4831-8853-e0f964dee743\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.530838 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/104266b0-fe3d-4831-8853-e0f964dee743-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5tvbr\" (UID: \"104266b0-fe3d-4831-8853-e0f964dee743\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:49:01 crc kubenswrapper[4948]: E1122 04:49:01.531135 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.031120548 +0000 UTC m=+144.717131294 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.539693 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2crl\" (UniqueName: \"kubernetes.io/projected/8c30c504-f3c7-4f34-878d-33fdbb884ffe-kube-api-access-b2crl\") pod \"console-operator-58897d9998-nfpm6\" (UID: \"8c30c504-f3c7-4f34-878d-33fdbb884ffe\") " pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.569835 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ljsv\" (UniqueName: \"kubernetes.io/projected/005ccf51-a81d-4fa0-b9ac-468732a14edf-kube-api-access-9ljsv\") pod \"apiserver-7bbb656c7d-xlnzv\" (UID: \"005ccf51-a81d-4fa0-b9ac-468732a14edf\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.579834 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.601164 4948 request.go:700] Waited for 1.896186963s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/serviceaccounts/openshift-controller-manager-operator/token Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.602428 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.602620 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jm8hv\" (UniqueName: \"kubernetes.io/projected/c74bf710-a8fa-4d55-9f68-771f56c145f7-kube-api-access-jm8hv\") pod \"downloads-7954f5f757-lpvjc\" (UID: \"c74bf710-a8fa-4d55-9f68-771f56c145f7\") " pod="openshift-console/downloads-7954f5f757-lpvjc" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.621530 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqt45\" (UniqueName: \"kubernetes.io/projected/166e874d-66a8-4f45-9290-43b3578139a6-kube-api-access-lqt45\") pod \"openshift-controller-manager-operator-756b6f6bc6-z8rdw\" (UID: \"166e874d-66a8-4f45-9290-43b3578139a6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.631884 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:01 crc kubenswrapper[4948]: E1122 04:49:01.632059 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.132032093 +0000 UTC m=+144.818042609 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632102 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-etcd-serving-ca\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632135 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7226f25-d408-493a-a5bd-634480a933ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632214 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-csi-data-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632233 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-config-volume\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632531 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcn6l\" (UniqueName: \"kubernetes.io/projected/19e41764-1439-4a74-a6ce-ffacb3448577-kube-api-access-zcn6l\") pod \"migrator-59844c95c7-78blh\" (UID: \"19e41764-1439-4a74-a6ce-ffacb3448577\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632749 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-etcd-serving-ca\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632551 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-default-certificate\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.632946 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-audit\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.633449 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-audit\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.633589 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-service-ca\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.633717 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bbss\" (UniqueName: \"kubernetes.io/projected/aaf64a47-9260-4ab8-83da-238e80d4965b-kube-api-access-6bbss\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.633865 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d2caf26-e32d-412f-b764-a050f5a5840c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.633946 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lhld\" (UniqueName: \"kubernetes.io/projected/104266b0-fe3d-4831-8853-e0f964dee743-kube-api-access-2lhld\") pod \"cluster-samples-operator-665b6dd947-5tvbr\" (UID: \"104266b0-fe3d-4831-8853-e0f964dee743\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.633987 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f0c31760-2506-4e99-b6ee-b072b77d60f7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634063 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-secret-volume\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634159 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/104266b0-fe3d-4831-8853-e0f964dee743-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5tvbr\" (UID: \"104266b0-fe3d-4831-8853-e0f964dee743\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634258 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/17cb3856-2d7a-49ba-8488-1b15832b26a3-trusted-ca\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634349 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/eff606a7-5cf1-461f-a63f-225dc013ef4b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634604 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-etcd-client\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634682 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kzvh\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-kube-api-access-6kzvh\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634740 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5scfj\" (UniqueName: \"kubernetes.io/projected/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-kube-api-access-5scfj\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634798 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-mountpoint-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.634924 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/00104340-651c-488e-ae76-53e040f58218-proxy-tls\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635050 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-serving-cert\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635083 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635103 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnwj2\" (UniqueName: \"kubernetes.io/projected/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-kube-api-access-gnwj2\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635131 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-encryption-config\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635149 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zcg4\" (UniqueName: \"kubernetes.io/projected/09733b67-9323-460c-ab8e-e55fbaf31542-kube-api-access-4zcg4\") pod \"control-plane-machine-set-operator-78cbb6b69f-h4z45\" (UID: \"09733b67-9323-460c-ab8e-e55fbaf31542\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635167 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1682ca74-e62f-492c-8de6-fa0fc27c5b39-node-pullsecrets\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635183 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/09733b67-9323-460c-ab8e-e55fbaf31542-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-h4z45\" (UID: \"09733b67-9323-460c-ab8e-e55fbaf31542\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635203 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a31ec4c4-1be7-4631-9611-3886eecd09fb-tmpfs\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635226 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1682ca74-e62f-492c-8de6-fa0fc27c5b39-node-pullsecrets\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635236 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-config\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635254 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0c31760-2506-4e99-b6ee-b072b77d60f7-config\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635291 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635313 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/17cb3856-2d7a-49ba-8488-1b15832b26a3-metrics-tls\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635331 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae58b080-edff-4582-839f-fc67d5b0b981-service-ca-bundle\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635348 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqz78\" (UniqueName: \"kubernetes.io/projected/ae58b080-edff-4582-839f-fc67d5b0b981-kube-api-access-kqz78\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635365 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1682ca74-e62f-492c-8de6-fa0fc27c5b39-audit-dir\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635380 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2e426879-bc45-4bad-8353-fe9be602ffb2-srv-cert\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635395 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmt89\" (UniqueName: \"kubernetes.io/projected/eff606a7-5cf1-461f-a63f-225dc013ef4b-kube-api-access-vmt89\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635420 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c09cae21-e7d6-4726-8cc3-44d49ddcf202-config\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635489 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnjvl\" (UniqueName: \"kubernetes.io/projected/17cb3856-2d7a-49ba-8488-1b15832b26a3-kube-api-access-mnjvl\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635507 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7226f25-d408-493a-a5bd-634480a933ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635521 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-config-volume\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635535 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/dd9b5929-4c7a-4598-922b-a5a37c9f4443-node-bootstrap-token\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635551 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k2wp\" (UniqueName: \"kubernetes.io/projected/6bd32efc-1948-4c04-bd33-eafa2ea7417c-kube-api-access-7k2wp\") pod \"package-server-manager-789f6589d5-h6gh5\" (UID: \"6bd32efc-1948-4c04-bd33-eafa2ea7417c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635567 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8f7g\" (UniqueName: \"kubernetes.io/projected/1682ca74-e62f-492c-8de6-fa0fc27c5b39-kube-api-access-n8f7g\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635583 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbd8x\" (UniqueName: \"kubernetes.io/projected/89163a77-fa00-44c4-aa83-969d24886d65-kube-api-access-gbd8x\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635600 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f863054-d2f5-4952-ab56-026b3a2bf341-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t9v4g\" (UID: \"9f863054-d2f5-4952-ab56-026b3a2bf341\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635717 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635801 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/17cb3856-2d7a-49ba-8488-1b15832b26a3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635819 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph7kf\" (UniqueName: \"kubernetes.io/projected/a31ec4c4-1be7-4631-9611-3886eecd09fb-kube-api-access-ph7kf\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635834 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/eff606a7-5cf1-461f-a63f-225dc013ef4b-srv-cert\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635861 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-trusted-ca\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635877 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-config\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.635988 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6c1aea-8440-4112-ac10-b638520ec37f-serving-cert\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.636009 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c09cae21-e7d6-4726-8cc3-44d49ddcf202-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.636029 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-config\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.636069 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-auth-proxy-config\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.636097 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-client\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.636111 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/6bd32efc-1948-4c04-bd33-eafa2ea7417c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h6gh5\" (UID: \"6bd32efc-1948-4c04-bd33-eafa2ea7417c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.636131 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbsgr\" (UniqueName: \"kubernetes.io/projected/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-kube-api-access-fbsgr\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.636409 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1682ca74-e62f-492c-8de6-fa0fc27c5b39-audit-dir\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.637249 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-config\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.637762 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-etcd-client\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.638078 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-image-import-ca\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.638119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9qrc\" (UniqueName: \"kubernetes.io/projected/2e426879-bc45-4bad-8353-fe9be602ffb2-kube-api-access-l9qrc\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.638142 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.638163 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vdqw\" (UniqueName: \"kubernetes.io/projected/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-kube-api-access-6vdqw\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.638410 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-bound-sa-token\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.638452 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87tfl\" (UniqueName: \"kubernetes.io/projected/dd9b5929-4c7a-4598-922b-a5a37c9f4443-kube-api-access-87tfl\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639070 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d2caf26-e32d-412f-b764-a050f5a5840c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639118 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4s2mz\" (UniqueName: \"kubernetes.io/projected/00104340-651c-488e-ae76-53e040f58218-kube-api-access-4s2mz\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639137 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-registration-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639172 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-certificates\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639207 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-ca\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639304 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4n29\" (UniqueName: \"kubernetes.io/projected/c7880b3d-8bca-4ad5-a645-0d043cc71538-kube-api-access-l4n29\") pod \"ingress-canary-ljtvf\" (UID: \"c7880b3d-8bca-4ad5-a645-0d043cc71538\") " pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639329 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qnmz\" (UniqueName: \"kubernetes.io/projected/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-kube-api-access-9qnmz\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639346 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2e426879-bc45-4bad-8353-fe9be602ffb2-profile-collector-cert\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639366 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-stats-auth\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639387 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-plugins-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.639678 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-trusted-ca\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.640025 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-serving-cert\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.640172 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d2caf26-e32d-412f-b764-a050f5a5840c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.640365 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-socket-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.640615 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-certificates\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.640691 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-metrics-tls\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.640840 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.640944 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0c31760-2506-4e99-b6ee-b072b77d60f7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641027 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7880b3d-8bca-4ad5-a645-0d043cc71538-cert\") pod \"ingress-canary-ljtvf\" (UID: \"c7880b3d-8bca-4ad5-a645-0d043cc71538\") " pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641071 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dzm9\" (UniqueName: \"kubernetes.io/projected/8c6c1aea-8440-4112-ac10-b638520ec37f-kube-api-access-2dzm9\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641090 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-serving-cert\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641123 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641142 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-tls\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641137 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-image-import-ca\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641350 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a31ec4c4-1be7-4631-9611-3886eecd09fb-apiservice-cert\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641538 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-metrics-certs\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: E1122 04:49:01.641551 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.141540041 +0000 UTC m=+144.827550557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641623 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/dd9b5929-4c7a-4598-922b-a5a37c9f4443-certs\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641651 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/89163a77-fa00-44c4-aa83-969d24886d65-signing-key\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641669 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-proxy-tls\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641692 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/89163a77-fa00-44c4-aa83-969d24886d65-signing-cabundle\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641708 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-images\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641724 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjs2t\" (UniqueName: \"kubernetes.io/projected/9f863054-d2f5-4952-ab56-026b3a2bf341-kube-api-access-qjs2t\") pod \"multus-admission-controller-857f4d67dd-t9v4g\" (UID: \"9f863054-d2f5-4952-ab56-026b3a2bf341\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641752 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksgqs\" (UniqueName: \"kubernetes.io/projected/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-kube-api-access-ksgqs\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641806 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-trusted-ca-bundle\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.641949 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a31ec4c4-1be7-4631-9611-3886eecd09fb-webhook-cert\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.642015 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7226f25-d408-493a-a5bd-634480a933ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.642084 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/00104340-651c-488e-ae76-53e040f58218-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.642143 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c09cae21-e7d6-4726-8cc3-44d49ddcf202-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.643709 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1682ca74-e62f-492c-8de6-fa0fc27c5b39-trusted-ca-bundle\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.643893 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1682ca74-e62f-492c-8de6-fa0fc27c5b39-encryption-config\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.644704 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d2caf26-e32d-412f-b764-a050f5a5840c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.645095 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ba391380-ff01-461f-b4e7-daa1f18d0198-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.648157 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/104266b0-fe3d-4831-8853-e0f964dee743-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-5tvbr\" (UID: \"104266b0-fe3d-4831-8853-e0f964dee743\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.652122 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-tls\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.656750 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbhpr\" (UniqueName: \"kubernetes.io/projected/b5f1f7f2-3a3e-464d-84f7-69e726b785a7-kube-api-access-pbhpr\") pod \"machine-api-operator-5694c8668f-mgqq5\" (UID: \"b5f1f7f2-3a3e-464d-84f7-69e726b785a7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.678896 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-lpvjc" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.690505 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbpcc\" (UniqueName: \"kubernetes.io/projected/0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e-kube-api-access-nbpcc\") pod \"openshift-config-operator-7777fb866f-jmcp4\" (UID: \"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.699302 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mqbs\" (UniqueName: \"kubernetes.io/projected/656450bf-ce50-4fc8-863e-274359778f85-kube-api-access-9mqbs\") pod \"oauth-openshift-558db77b4-2vbhk\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.708925 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.716458 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.717232 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x54sv\" (UniqueName: \"kubernetes.io/projected/714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0-kube-api-access-x54sv\") pod \"machine-approver-56656f9798-qg6ht\" (UID: \"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.742679 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v88n\" (UniqueName: \"kubernetes.io/projected/ba391380-ff01-461f-b4e7-daa1f18d0198-kube-api-access-5v88n\") pod \"cluster-image-registry-operator-dc59b4c8b-b5v6z\" (UID: \"ba391380-ff01-461f-b4e7-daa1f18d0198\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743503 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743704 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7226f25-d408-493a-a5bd-634480a933ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: E1122 04:49:01.743760 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.243736311 +0000 UTC m=+144.929746847 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743802 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-csi-data-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743831 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-config-volume\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743855 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcn6l\" (UniqueName: \"kubernetes.io/projected/19e41764-1439-4a74-a6ce-ffacb3448577-kube-api-access-zcn6l\") pod \"migrator-59844c95c7-78blh\" (UID: \"19e41764-1439-4a74-a6ce-ffacb3448577\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743882 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-default-certificate\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743906 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-service-ca\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743928 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bbss\" (UniqueName: \"kubernetes.io/projected/aaf64a47-9260-4ab8-83da-238e80d4965b-kube-api-access-6bbss\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743961 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f0c31760-2506-4e99-b6ee-b072b77d60f7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.743982 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-secret-volume\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744003 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/17cb3856-2d7a-49ba-8488-1b15832b26a3-trusted-ca\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744051 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/eff606a7-5cf1-461f-a63f-225dc013ef4b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744085 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5scfj\" (UniqueName: \"kubernetes.io/projected/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-kube-api-access-5scfj\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744109 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-mountpoint-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744141 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/00104340-651c-488e-ae76-53e040f58218-proxy-tls\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744169 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744195 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnwj2\" (UniqueName: \"kubernetes.io/projected/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-kube-api-access-gnwj2\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744219 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zcg4\" (UniqueName: \"kubernetes.io/projected/09733b67-9323-460c-ab8e-e55fbaf31542-kube-api-access-4zcg4\") pod \"control-plane-machine-set-operator-78cbb6b69f-h4z45\" (UID: \"09733b67-9323-460c-ab8e-e55fbaf31542\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744243 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a31ec4c4-1be7-4631-9611-3886eecd09fb-tmpfs\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744282 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/09733b67-9323-460c-ab8e-e55fbaf31542-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-h4z45\" (UID: \"09733b67-9323-460c-ab8e-e55fbaf31542\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744316 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-config\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744344 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744368 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0c31760-2506-4e99-b6ee-b072b77d60f7-config\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744389 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/17cb3856-2d7a-49ba-8488-1b15832b26a3-metrics-tls\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744408 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae58b080-edff-4582-839f-fc67d5b0b981-service-ca-bundle\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744429 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqz78\" (UniqueName: \"kubernetes.io/projected/ae58b080-edff-4582-839f-fc67d5b0b981-kube-api-access-kqz78\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744455 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2e426879-bc45-4bad-8353-fe9be602ffb2-srv-cert\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744499 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmt89\" (UniqueName: \"kubernetes.io/projected/eff606a7-5cf1-461f-a63f-225dc013ef4b-kube-api-access-vmt89\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744522 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c09cae21-e7d6-4726-8cc3-44d49ddcf202-config\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744543 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnjvl\" (UniqueName: \"kubernetes.io/projected/17cb3856-2d7a-49ba-8488-1b15832b26a3-kube-api-access-mnjvl\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744566 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-config-volume\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744598 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/dd9b5929-4c7a-4598-922b-a5a37c9f4443-node-bootstrap-token\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744622 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k2wp\" (UniqueName: \"kubernetes.io/projected/6bd32efc-1948-4c04-bd33-eafa2ea7417c-kube-api-access-7k2wp\") pod \"package-server-manager-789f6589d5-h6gh5\" (UID: \"6bd32efc-1948-4c04-bd33-eafa2ea7417c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744642 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7226f25-d408-493a-a5bd-634480a933ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744717 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbd8x\" (UniqueName: \"kubernetes.io/projected/89163a77-fa00-44c4-aa83-969d24886d65-kube-api-access-gbd8x\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744743 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f863054-d2f5-4952-ab56-026b3a2bf341-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t9v4g\" (UID: \"9f863054-d2f5-4952-ab56-026b3a2bf341\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744747 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-config-volume\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744790 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph7kf\" (UniqueName: \"kubernetes.io/projected/a31ec4c4-1be7-4631-9611-3886eecd09fb-kube-api-access-ph7kf\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744812 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/eff606a7-5cf1-461f-a63f-225dc013ef4b-srv-cert\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744869 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-csi-data-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744888 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/17cb3856-2d7a-49ba-8488-1b15832b26a3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744913 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6c1aea-8440-4112-ac10-b638520ec37f-serving-cert\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744933 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c09cae21-e7d6-4726-8cc3-44d49ddcf202-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744955 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-config\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.744979 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-auth-proxy-config\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745001 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-client\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745032 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/6bd32efc-1948-4c04-bd33-eafa2ea7417c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h6gh5\" (UID: \"6bd32efc-1948-4c04-bd33-eafa2ea7417c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745058 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbsgr\" (UniqueName: \"kubernetes.io/projected/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-kube-api-access-fbsgr\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745082 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9qrc\" (UniqueName: \"kubernetes.io/projected/2e426879-bc45-4bad-8353-fe9be602ffb2-kube-api-access-l9qrc\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745106 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745133 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vdqw\" (UniqueName: \"kubernetes.io/projected/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-kube-api-access-6vdqw\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745172 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87tfl\" (UniqueName: \"kubernetes.io/projected/dd9b5929-4c7a-4598-922b-a5a37c9f4443-kube-api-access-87tfl\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745195 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4s2mz\" (UniqueName: \"kubernetes.io/projected/00104340-651c-488e-ae76-53e040f58218-kube-api-access-4s2mz\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745216 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-registration-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745263 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-ca\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745296 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4n29\" (UniqueName: \"kubernetes.io/projected/c7880b3d-8bca-4ad5-a645-0d043cc71538-kube-api-access-l4n29\") pod \"ingress-canary-ljtvf\" (UID: \"c7880b3d-8bca-4ad5-a645-0d043cc71538\") " pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745325 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qnmz\" (UniqueName: \"kubernetes.io/projected/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-kube-api-access-9qnmz\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745353 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-plugins-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745377 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2e426879-bc45-4bad-8353-fe9be602ffb2-profile-collector-cert\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745397 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-stats-auth\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745418 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-metrics-tls\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745438 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-socket-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745691 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0c31760-2506-4e99-b6ee-b072b77d60f7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745715 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7880b3d-8bca-4ad5-a645-0d043cc71538-cert\") pod \"ingress-canary-ljtvf\" (UID: \"c7880b3d-8bca-4ad5-a645-0d043cc71538\") " pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745744 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745782 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745839 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dzm9\" (UniqueName: \"kubernetes.io/projected/8c6c1aea-8440-4112-ac10-b638520ec37f-kube-api-access-2dzm9\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745869 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-serving-cert\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745913 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a31ec4c4-1be7-4631-9611-3886eecd09fb-apiservice-cert\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745928 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-config\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745941 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-metrics-certs\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.745998 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/dd9b5929-4c7a-4598-922b-a5a37c9f4443-certs\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746032 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-proxy-tls\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746067 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/89163a77-fa00-44c4-aa83-969d24886d65-signing-key\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746097 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-images\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746132 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjs2t\" (UniqueName: \"kubernetes.io/projected/9f863054-d2f5-4952-ab56-026b3a2bf341-kube-api-access-qjs2t\") pod \"multus-admission-controller-857f4d67dd-t9v4g\" (UID: \"9f863054-d2f5-4952-ab56-026b3a2bf341\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746188 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/89163a77-fa00-44c4-aa83-969d24886d65-signing-cabundle\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746225 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksgqs\" (UniqueName: \"kubernetes.io/projected/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-kube-api-access-ksgqs\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746298 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a31ec4c4-1be7-4631-9611-3886eecd09fb-webhook-cert\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746336 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7226f25-d408-493a-a5bd-634480a933ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746374 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/00104340-651c-488e-ae76-53e040f58218-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.746408 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c09cae21-e7d6-4726-8cc3-44d49ddcf202-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.747694 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/89163a77-fa00-44c4-aa83-969d24886d65-signing-cabundle\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.748860 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-images\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.750639 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-plugins-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.752298 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a31ec4c4-1be7-4631-9611-3886eecd09fb-webhook-cert\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.752553 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7226f25-d408-493a-a5bd-634480a933ad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.753058 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.753551 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0c31760-2506-4e99-b6ee-b072b77d60f7-config\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.753982 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-proxy-tls\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.754064 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-mountpoint-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.754705 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-service-ca\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.755876 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae58b080-edff-4582-839f-fc67d5b0b981-service-ca-bundle\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: E1122 04:49:01.756385 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.256361427 +0000 UTC m=+144.942372013 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.757622 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/00104340-651c-488e-ae76-53e040f58218-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.757925 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.758865 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c09cae21-e7d6-4726-8cc3-44d49ddcf202-config\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.760188 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/17cb3856-2d7a-49ba-8488-1b15832b26a3-trusted-ca\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.760996 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-registration-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.761607 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-ca\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.762726 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a31ec4c4-1be7-4631-9611-3886eecd09fb-tmpfs\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.762724 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-auth-proxy-config\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.762824 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-config\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.763507 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-socket-dir\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.763459 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-config-volume\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.764410 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.767689 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7226f25-d408-493a-a5bd-634480a933ad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.774631 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-secret-volume\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.778030 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/17cb3856-2d7a-49ba-8488-1b15832b26a3-metrics-tls\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.778304 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0c31760-2506-4e99-b6ee-b072b77d60f7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.778350 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/dd9b5929-4c7a-4598-922b-a5a37c9f4443-node-bootstrap-token\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.778731 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-serving-cert\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.778732 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8c6c1aea-8440-4112-ac10-b638520ec37f-etcd-client\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.778965 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/dd9b5929-4c7a-4598-922b-a5a37c9f4443-certs\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.779121 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-default-certificate\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.782290 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/89163a77-fa00-44c4-aa83-969d24886d65-signing-key\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.784739 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2e426879-bc45-4bad-8353-fe9be602ffb2-profile-collector-cert\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.784863 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-metrics-tls\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.785425 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.785902 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6c1aea-8440-4112-ac10-b638520ec37f-serving-cert\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.785914 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2e426879-bc45-4bad-8353-fe9be602ffb2-srv-cert\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.786922 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7880b3d-8bca-4ad5-a645-0d043cc71538-cert\") pod \"ingress-canary-ljtvf\" (UID: \"c7880b3d-8bca-4ad5-a645-0d043cc71538\") " pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.787016 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a31ec4c4-1be7-4631-9611-3886eecd09fb-apiservice-cert\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.787430 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-stats-auth\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.788633 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ae58b080-edff-4582-839f-fc67d5b0b981-metrics-certs\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.788839 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c09cae21-e7d6-4726-8cc3-44d49ddcf202-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.789164 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f863054-d2f5-4952-ab56-026b3a2bf341-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t9v4g\" (UID: \"9f863054-d2f5-4952-ab56-026b3a2bf341\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.791186 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/eff606a7-5cf1-461f-a63f-225dc013ef4b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.791277 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/00104340-651c-488e-ae76-53e040f58218-proxy-tls\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.791336 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.793056 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/eff606a7-5cf1-461f-a63f-225dc013ef4b-srv-cert\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.794185 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv2qs\" (UniqueName: \"kubernetes.io/projected/024d4cbd-d082-4074-a271-b01445f26510-kube-api-access-jv2qs\") pod \"openshift-apiserver-operator-796bbdcf4f-f4mhc\" (UID: \"024d4cbd-d082-4074-a271-b01445f26510\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.794445 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/6bd32efc-1948-4c04-bd33-eafa2ea7417c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h6gh5\" (UID: \"6bd32efc-1948-4c04-bd33-eafa2ea7417c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.794584 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/09733b67-9323-460c-ab8e-e55fbaf31542-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-h4z45\" (UID: \"09733b67-9323-460c-ab8e-e55fbaf31542\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.796543 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ww787\" (UniqueName: \"kubernetes.io/projected/1055fda6-8601-43b2-a82c-0dc3ddedc96a-kube-api-access-ww787\") pod \"authentication-operator-69f744f599-ttcbt\" (UID: \"1055fda6-8601-43b2-a82c-0dc3ddedc96a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.798865 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q68pt\" (UniqueName: \"kubernetes.io/projected/fdc54e28-5b97-48c7-824a-3ed65296e98e-kube-api-access-q68pt\") pod \"dns-operator-744455d44c-w8qrc\" (UID: \"fdc54e28-5b97-48c7-824a-3ed65296e98e\") " pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.823829 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7ztq\" (UniqueName: \"kubernetes.io/projected/3ce40099-332b-49b0-8eee-914df6a6a572-kube-api-access-p7ztq\") pod \"console-f9d7485db-kmbfh\" (UID: \"3ce40099-332b-49b0-8eee-914df6a6a572\") " pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.847945 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:01 crc kubenswrapper[4948]: E1122 04:49:01.848610 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.348590766 +0000 UTC m=+145.034601302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.873361 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lhld\" (UniqueName: \"kubernetes.io/projected/104266b0-fe3d-4831-8853-e0f964dee743-kube-api-access-2lhld\") pod \"cluster-samples-operator-665b6dd947-5tvbr\" (UID: \"104266b0-fe3d-4831-8853-e0f964dee743\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.882353 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kzvh\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-kube-api-access-6kzvh\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.892528 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.897722 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8f7g\" (UniqueName: \"kubernetes.io/projected/1682ca74-e62f-492c-8de6-fa0fc27c5b39-kube-api-access-n8f7g\") pod \"apiserver-76f77b778f-xkfvj\" (UID: \"1682ca74-e62f-492c-8de6-fa0fc27c5b39\") " pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.920088 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.921798 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-bound-sa-token\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.943532 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7226f25-d408-493a-a5bd-634480a933ad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-zjh6q\" (UID: \"c7226f25-d408-493a-a5bd-634480a933ad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.945928 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.955231 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.955397 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-fznqf"] Nov 22 04:49:01 crc kubenswrapper[4948]: E1122 04:49:01.955610 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.455593652 +0000 UTC m=+145.141604258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.956824 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.957632 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c09cae21-e7d6-4726-8cc3-44d49ddcf202-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7czmn\" (UID: \"c09cae21-e7d6-4726-8cc3-44d49ddcf202\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.970111 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.975440 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcn6l\" (UniqueName: \"kubernetes.io/projected/19e41764-1439-4a74-a6ce-ffacb3448577-kube-api-access-zcn6l\") pod \"migrator-59844c95c7-78blh\" (UID: \"19e41764-1439-4a74-a6ce-ffacb3448577\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" Nov 22 04:49:01 crc kubenswrapper[4948]: W1122 04:49:01.984295 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a821dfa_73f0_4d83_b480_f566a1ce12fc.slice/crio-284809c4d061cef6f1f66ab634a3275d9b8dba31b20f1124979d8df08b53db50 WatchSource:0}: Error finding container 284809c4d061cef6f1f66ab634a3275d9b8dba31b20f1124979d8df08b53db50: Status 404 returned error can't find the container with id 284809c4d061cef6f1f66ab634a3275d9b8dba31b20f1124979d8df08b53db50 Nov 22 04:49:01 crc kubenswrapper[4948]: I1122 04:49:01.999683 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjs2t\" (UniqueName: \"kubernetes.io/projected/9f863054-d2f5-4952-ab56-026b3a2bf341-kube-api-access-qjs2t\") pod \"multus-admission-controller-857f4d67dd-t9v4g\" (UID: \"9f863054-d2f5-4952-ab56-026b3a2bf341\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.016811 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksgqs\" (UniqueName: \"kubernetes.io/projected/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-kube-api-access-ksgqs\") pod \"collect-profiles-29396445-9b9j9\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.022128 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.028939 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.038242 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dzm9\" (UniqueName: \"kubernetes.io/projected/8c6c1aea-8440-4112-ac10-b638520ec37f-kube-api-access-2dzm9\") pod \"etcd-operator-b45778765-n4hqv\" (UID: \"8c6c1aea-8440-4112-ac10-b638520ec37f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.045560 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.048655 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.056239 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.056698 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.556680751 +0000 UTC m=+145.242691267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.059017 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bbss\" (UniqueName: \"kubernetes.io/projected/aaf64a47-9260-4ab8-83da-238e80d4965b-kube-api-access-6bbss\") pod \"marketplace-operator-79b997595-bcq8w\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.069448 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.079635 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f0c31760-2506-4e99-b6ee-b072b77d60f7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-c6tw5\" (UID: \"f0c31760-2506-4e99-b6ee-b072b77d60f7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.081922 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.083729 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-nfpm6"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.098801 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k2wp\" (UniqueName: \"kubernetes.io/projected/6bd32efc-1948-4c04-bd33-eafa2ea7417c-kube-api-access-7k2wp\") pod \"package-server-manager-789f6589d5-h6gh5\" (UID: \"6bd32efc-1948-4c04-bd33-eafa2ea7417c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.120949 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2vbhk"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.133894 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqz78\" (UniqueName: \"kubernetes.io/projected/ae58b080-edff-4582-839f-fc67d5b0b981-kube-api-access-kqz78\") pod \"router-default-5444994796-h8wxv\" (UID: \"ae58b080-edff-4582-839f-fc67d5b0b981\") " pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.146088 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.146845 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/17cb3856-2d7a-49ba-8488-1b15832b26a3-bound-sa-token\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.157259 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.157681 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.657669568 +0000 UTC m=+145.343680084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.161795 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbd8x\" (UniqueName: \"kubernetes.io/projected/89163a77-fa00-44c4-aa83-969d24886d65-kube-api-access-gbd8x\") pod \"service-ca-9c57cc56f-vgmn6\" (UID: \"89163a77-fa00-44c4-aa83-969d24886d65\") " pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.162043 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.162183 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.182058 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-mgqq5"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.189557 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmt89\" (UniqueName: \"kubernetes.io/projected/eff606a7-5cf1-461f-a63f-225dc013ef4b-kube-api-access-vmt89\") pod \"olm-operator-6b444d44fb-4hf57\" (UID: \"eff606a7-5cf1-461f-a63f-225dc013ef4b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.192090 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.193979 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-lpvjc"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.199937 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnjvl\" (UniqueName: \"kubernetes.io/projected/17cb3856-2d7a-49ba-8488-1b15832b26a3-kube-api-access-mnjvl\") pod \"ingress-operator-5b745b69d9-gld76\" (UID: \"17cb3856-2d7a-49ba-8488-1b15832b26a3\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:02 crc kubenswrapper[4948]: W1122 04:49:02.204390 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024d4cbd_d082_4074_a271_b01445f26510.slice/crio-a7f0e16511f488447d1a67f147330f1afd36c228893a73eaa61d8a3c34fb8963 WatchSource:0}: Error finding container a7f0e16511f488447d1a67f147330f1afd36c228893a73eaa61d8a3c34fb8963: Status 404 returned error can't find the container with id a7f0e16511f488447d1a67f147330f1afd36c228893a73eaa61d8a3c34fb8963 Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.208030 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.209765 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.213278 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.215953 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.219591 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w8qrc"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.220646 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.223683 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5scfj\" (UniqueName: \"kubernetes.io/projected/33fef7f7-8e0c-475b-9e5b-8b3eab7b609c-kube-api-access-5scfj\") pod \"kube-storage-version-migrator-operator-b67b599dd-6qmls\" (UID: \"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.227786 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.240390 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4s2mz\" (UniqueName: \"kubernetes.io/projected/00104340-651c-488e-ae76-53e040f58218-kube-api-access-4s2mz\") pod \"machine-config-controller-84d6567774-w7xl7\" (UID: \"00104340-651c-488e-ae76-53e040f58218\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.256429 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbsgr\" (UniqueName: \"kubernetes.io/projected/fd10d6c8-51cb-438e-91b8-3ddaf3c3733d-kube-api-access-fbsgr\") pod \"csi-hostpathplugin-5wl5s\" (UID: \"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d\") " pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.258939 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.259434 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.759415446 +0000 UTC m=+145.445425962 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.264267 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:02 crc kubenswrapper[4948]: W1122 04:49:02.264599 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod166e874d_66a8_4f45_9290_43b3578139a6.slice/crio-76bd7d72e7bddce874757fe818908731a81623cd0daa18be49608238340c2d07 WatchSource:0}: Error finding container 76bd7d72e7bddce874757fe818908731a81623cd0daa18be49608238340c2d07: Status 404 returned error can't find the container with id 76bd7d72e7bddce874757fe818908731a81623cd0daa18be49608238340c2d07 Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.276574 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.282505 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.284746 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9qrc\" (UniqueName: \"kubernetes.io/projected/2e426879-bc45-4bad-8353-fe9be602ffb2-kube-api-access-l9qrc\") pod \"catalog-operator-68c6474976-s4wrk\" (UID: \"2e426879-bc45-4bad-8353-fe9be602ffb2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.292108 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.295091 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.297430 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4n29\" (UniqueName: \"kubernetes.io/projected/c7880b3d-8bca-4ad5-a645-0d043cc71538-kube-api-access-l4n29\") pod \"ingress-canary-ljtvf\" (UID: \"c7880b3d-8bca-4ad5-a645-0d043cc71538\") " pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.313603 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.318041 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ljtvf" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.318549 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qnmz\" (UniqueName: \"kubernetes.io/projected/f69db977-2a9e-4f6b-8bf9-1abfd73b373f-kube-api-access-9qnmz\") pod \"dns-default-qrscw\" (UID: \"f69db977-2a9e-4f6b-8bf9-1abfd73b373f\") " pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.338307 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vdqw\" (UniqueName: \"kubernetes.io/projected/c2fa87ce-b218-4c18-96cb-1fe0b7e5a248-kube-api-access-6vdqw\") pod \"machine-config-operator-74547568cd-7678m\" (UID: \"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.360602 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.360993 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph7kf\" (UniqueName: \"kubernetes.io/projected/a31ec4c4-1be7-4631-9611-3886eecd09fb-kube-api-access-ph7kf\") pod \"packageserver-d55dfcdfc-c7vs6\" (UID: \"a31ec4c4-1be7-4631-9611-3886eecd09fb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.365634 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.865613189 +0000 UTC m=+145.551623715 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.377370 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zcg4\" (UniqueName: \"kubernetes.io/projected/09733b67-9323-460c-ab8e-e55fbaf31542-kube-api-access-4zcg4\") pod \"control-plane-machine-set-operator-78cbb6b69f-h4z45\" (UID: \"09733b67-9323-460c-ab8e-e55fbaf31542\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.377401 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.391053 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.403042 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87tfl\" (UniqueName: \"kubernetes.io/projected/dd9b5929-4c7a-4598-922b-a5a37c9f4443-kube-api-access-87tfl\") pod \"machine-config-server-22sb9\" (UID: \"dd9b5929-4c7a-4598-922b-a5a37c9f4443\") " pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.403264 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.415903 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnwj2\" (UniqueName: \"kubernetes.io/projected/be4dab2f-0e4f-464f-8a71-5fe7baa561a5-kube-api-access-gnwj2\") pod \"service-ca-operator-777779d784-6rhjl\" (UID: \"be4dab2f-0e4f-464f-8a71-5fe7baa561a5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.452701 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.461882 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.462034 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.962008666 +0000 UTC m=+145.648019182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.462152 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.462439 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:02.962425228 +0000 UTC m=+145.648435744 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.468273 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.476734 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.497027 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.500891 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-22sb9" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.514557 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.537773 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.540925 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.551111 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ttcbt"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.562844 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.563217 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.063199068 +0000 UTC m=+145.749209584 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.596594 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-xkfvj"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.623106 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.628860 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kmbfh"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.658606 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t9v4g"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.664398 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.664830 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.164808802 +0000 UTC m=+145.850819388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.716541 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.717774 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.720991 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" event={"ID":"fdc54e28-5b97-48c7-824a-3ed65296e98e","Type":"ContainerStarted","Data":"5ce7f450a4da59ea176a6820c741d18af29984e3ede000af9058d8fa2055283a"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.722981 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" event={"ID":"8c30c504-f3c7-4f34-878d-33fdbb884ffe","Type":"ContainerStarted","Data":"b78a085579d4b88745c831f8933916f6d06780eef4686330d202b890c82b7d25"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.724770 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" event={"ID":"656450bf-ce50-4fc8-863e-274359778f85","Type":"ContainerStarted","Data":"5b6840ed5b8219dbb88bb6f6708337b5b1d36168b6d353ee44b4cd0238e4a279"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.733034 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-lpvjc" event={"ID":"c74bf710-a8fa-4d55-9f68-771f56c145f7","Type":"ContainerStarted","Data":"943347234b98f0610882a20af382b3330493151737ea53861d29d1d049a5a436"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.733964 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" event={"ID":"005ccf51-a81d-4fa0-b9ac-468732a14edf","Type":"ContainerStarted","Data":"dfeef2bc11bb71729c594702ac8e09070df3e96390eb846381131ccd427f2e70"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.739244 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" event={"ID":"166e874d-66a8-4f45-9290-43b3578139a6","Type":"ContainerStarted","Data":"76bd7d72e7bddce874757fe818908731a81623cd0daa18be49608238340c2d07"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.740947 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" event={"ID":"b5f1f7f2-3a3e-464d-84f7-69e726b785a7","Type":"ContainerStarted","Data":"df2d33c33314701684ac1e48dcb516ed56454e4af821c616fde6da02e5c93fcb"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.747992 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" event={"ID":"024d4cbd-d082-4074-a271-b01445f26510","Type":"ContainerStarted","Data":"a7f0e16511f488447d1a67f147330f1afd36c228893a73eaa61d8a3c34fb8963"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.749581 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" event={"ID":"9a821dfa-73f0-4d83-b480-f566a1ce12fc","Type":"ContainerStarted","Data":"284809c4d061cef6f1f66ab634a3275d9b8dba31b20f1124979d8df08b53db50"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.751026 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" event={"ID":"45a847ab-7976-4dbe-9ccc-5c89490b7c52","Type":"ContainerStarted","Data":"7f13ef20180fd70e4c6715dbc9ca40ed03e88537320784e1a41952f080a96303"} Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.752125 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" event={"ID":"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0","Type":"ContainerStarted","Data":"44ddca14475f582a42580fd6d35a9ebce20bdcc581471d9fe5b6a1239e189157"} Nov 22 04:49:02 crc kubenswrapper[4948]: W1122 04:49:02.759380 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae58b080_edff_4582_839f_fc67d5b0b981.slice/crio-15ebd167213be44d9a50f0a360ef9023f1d0beb09596507aa118e7b5fe1fdb65 WatchSource:0}: Error finding container 15ebd167213be44d9a50f0a360ef9023f1d0beb09596507aa118e7b5fe1fdb65: Status 404 returned error can't find the container with id 15ebd167213be44d9a50f0a360ef9023f1d0beb09596507aa118e7b5fe1fdb65 Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.765517 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.765703 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.265677505 +0000 UTC m=+145.951688021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.765809 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.766154 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.266142848 +0000 UTC m=+145.952153364 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.776487 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-n4hqv"] Nov 22 04:49:02 crc kubenswrapper[4948]: W1122 04:49:02.827998 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc09cae21_e7d6_4726_8cc3_44d49ddcf202.slice/crio-ad0ddc99a31b30c3f74c652f8084b51e93b22322ef3a5020683ab270d0230581 WatchSource:0}: Error finding container ad0ddc99a31b30c3f74c652f8084b51e93b22322ef3a5020683ab270d0230581: Status 404 returned error can't find the container with id ad0ddc99a31b30c3f74c652f8084b51e93b22322ef3a5020683ab270d0230581 Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.869201 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.869416 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.369392478 +0000 UTC m=+146.055402994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.869732 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.870201 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.370185891 +0000 UTC m=+146.056196407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.899940 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q"] Nov 22 04:49:02 crc kubenswrapper[4948]: I1122 04:49:02.972618 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:02 crc kubenswrapper[4948]: E1122 04:49:02.972985 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.472971298 +0000 UTC m=+146.158981814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.062221 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bcq8w"] Nov 22 04:49:03 crc kubenswrapper[4948]: W1122 04:49:03.066260 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7226f25_d408_493a_a5bd_634480a933ad.slice/crio-c3193532522aec11e393f307af52aeb17615ca493460cf28b3349922431c3df7 WatchSource:0}: Error finding container c3193532522aec11e393f307af52aeb17615ca493460cf28b3349922431c3df7: Status 404 returned error can't find the container with id c3193532522aec11e393f307af52aeb17615ca493460cf28b3349922431c3df7 Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.076592 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.076888 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.576878216 +0000 UTC m=+146.262888732 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.182394 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.182824 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.682788741 +0000 UTC m=+146.368799257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.183155 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.183572 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.683552223 +0000 UTC m=+146.369562799 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.279129 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh"] Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.290913 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.291048 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.791029402 +0000 UTC m=+146.477039918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.291583 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.291881 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.791869026 +0000 UTC m=+146.477879542 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: W1122 04:49:03.333839 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd9b5929_4c7a_4598_922b_a5a37c9f4443.slice/crio-33953d574e4751123a096dda641f7346aebd2a707d741d455ae3a1c8dc08b211 WatchSource:0}: Error finding container 33953d574e4751123a096dda641f7346aebd2a707d741d455ae3a1c8dc08b211: Status 404 returned error can't find the container with id 33953d574e4751123a096dda641f7346aebd2a707d741d455ae3a1c8dc08b211 Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.336642 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9"] Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.385760 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5"] Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.392833 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.401743 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:03.901689921 +0000 UTC m=+146.587700437 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.411391 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57"] Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.502991 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.503356 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.003340406 +0000 UTC m=+146.689350922 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.603899 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.603997 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.103972343 +0000 UTC m=+146.789982859 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.604628 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.604963 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.10494022 +0000 UTC m=+146.790950736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.706107 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.706254 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.206229095 +0000 UTC m=+146.892239611 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.706381 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.706798 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.206738379 +0000 UTC m=+146.892748895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798210 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798703 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798719 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" event={"ID":"b5f1f7f2-3a3e-464d-84f7-69e726b785a7","Type":"ContainerStarted","Data":"08ad6ca8feb7d06263256a22f42de0fb4895a79e80b9a1633e94ba8a4e0d9190"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798741 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" event={"ID":"eff606a7-5cf1-461f-a63f-225dc013ef4b","Type":"ContainerStarted","Data":"b53b0dce64921f11c1e06fd13f22fc82f4f31be9e2e96e4a17fb30320043ac22"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798779 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" event={"ID":"104266b0-fe3d-4831-8853-e0f964dee743","Type":"ContainerStarted","Data":"fa977eae7273f34ac0500a6bf521674dc5997c8ad690327e0de552607095ee84"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798795 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" event={"ID":"9f863054-d2f5-4952-ab56-026b3a2bf341","Type":"ContainerStarted","Data":"2b12171360db6ddf06be460c44d6a3fda3d252eb4b8641af7e280d935eeca93b"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798809 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" event={"ID":"45a847ab-7976-4dbe-9ccc-5c89490b7c52","Type":"ContainerStarted","Data":"d84eaec122ca7d2665a3189eab86133e059605cce2370cc7ac31994fac9d8c3e"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798848 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kmbfh" event={"ID":"3ce40099-332b-49b0-8eee-914df6a6a572","Type":"ContainerStarted","Data":"f05d8392841f4598455649e64f75a39bce67a2c03ec07b552d25f1d8ea4f9a07"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.798862 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" event={"ID":"8c30c504-f3c7-4f34-878d-33fdbb884ffe","Type":"ContainerStarted","Data":"4994a1872b281884f77ca09791b6b67d221bec607085340023c54208f474b2f8"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.800511 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" event={"ID":"c09cae21-e7d6-4726-8cc3-44d49ddcf202","Type":"ContainerStarted","Data":"ad0ddc99a31b30c3f74c652f8084b51e93b22322ef3a5020683ab270d0230581"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.802313 4948 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-l7pw8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.802374 4948 patch_prober.go:28] interesting pod/console-operator-58897d9998-nfpm6 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.802396 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" podUID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.802432 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" podUID="8c30c504-f3c7-4f34-878d-33fdbb884ffe" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.806900 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.807105 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.307071367 +0000 UTC m=+146.993081883 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.807162 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.807220 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" event={"ID":"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4","Type":"ContainerStarted","Data":"90ac4090590018efd1c496bc331e832e5e8d4938a2712b4e9d8c5561dd82f098"} Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.807484 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.307473948 +0000 UTC m=+146.993484464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.813641 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" event={"ID":"1682ca74-e62f-492c-8de6-fa0fc27c5b39","Type":"ContainerStarted","Data":"969c14939e164e87757c064c3cc662346dc1bd817dc286e08da206a93257f64f"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.832745 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" event={"ID":"ba391380-ff01-461f-b4e7-daa1f18d0198","Type":"ContainerStarted","Data":"4f1be4ad3d1fa861f3ac651369bba81890d514db5fb8650934758f7981e010a5"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.835433 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" event={"ID":"656450bf-ce50-4fc8-863e-274359778f85","Type":"ContainerStarted","Data":"9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.836925 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.838569 4948 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-2vbhk container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.28:6443/healthz\": dial tcp 10.217.0.28:6443: connect: connection refused" start-of-body= Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.838599 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" podUID="656450bf-ce50-4fc8-863e-274359778f85" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.28:6443/healthz\": dial tcp 10.217.0.28:6443: connect: connection refused" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.840496 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" event={"ID":"1055fda6-8601-43b2-a82c-0dc3ddedc96a","Type":"ContainerStarted","Data":"f0388279db569041fb18034bae7c5c2d7d659d0a6785f23d8338cf6ca5299568"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.840519 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" event={"ID":"1055fda6-8601-43b2-a82c-0dc3ddedc96a","Type":"ContainerStarted","Data":"29b02a30a67839eea56327608c67d7c5f4927dd85d145aac730351e8bfd2db74"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.846060 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" event={"ID":"19e41764-1439-4a74-a6ce-ffacb3448577","Type":"ContainerStarted","Data":"f6927c1a0dffb0c170249587e3634deca6b4431cc87b7b398229ccf2f418c87a"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.849561 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" event={"ID":"f0c31760-2506-4e99-b6ee-b072b77d60f7","Type":"ContainerStarted","Data":"0b5561668ebde19cac6381b4a47418b326c8e5da4a3f64141fdcb126867a745a"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.854841 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" event={"ID":"c7226f25-d408-493a-a5bd-634480a933ad","Type":"ContainerStarted","Data":"c3193532522aec11e393f307af52aeb17615ca493460cf28b3349922431c3df7"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.867522 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" event={"ID":"8c6c1aea-8440-4112-ac10-b638520ec37f","Type":"ContainerStarted","Data":"4f436385c5d2452346489ba036e043e38c870805044009562fad37f09b600e8b"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.872591 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-h8wxv" event={"ID":"ae58b080-edff-4582-839f-fc67d5b0b981","Type":"ContainerStarted","Data":"15ebd167213be44d9a50f0a360ef9023f1d0beb09596507aa118e7b5fe1fdb65"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.878797 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" event={"ID":"9a821dfa-73f0-4d83-b480-f566a1ce12fc","Type":"ContainerStarted","Data":"093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.879374 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.880746 4948 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-fznqf container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.880779 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" podUID="9a821dfa-73f0-4d83-b480-f566a1ce12fc" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.881043 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-22sb9" event={"ID":"dd9b5929-4c7a-4598-922b-a5a37c9f4443","Type":"ContainerStarted","Data":"33953d574e4751123a096dda641f7346aebd2a707d741d455ae3a1c8dc08b211"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.883003 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" event={"ID":"024d4cbd-d082-4074-a271-b01445f26510","Type":"ContainerStarted","Data":"91426d136daa9ea4de4f6b41441f827c86649a84e0db3c5687ec20e424620139"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.884225 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" event={"ID":"aaf64a47-9260-4ab8-83da-238e80d4965b","Type":"ContainerStarted","Data":"5b6191cdc488524c9206a37c1caa26159211ca15b85a0315f3645fe4f7e6de74"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.886767 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" event={"ID":"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0","Type":"ContainerStarted","Data":"3d183f23e958d255277e80f82165805b57d2ef01777b9f748d512e49af8c37be"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.888848 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" event={"ID":"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e","Type":"ContainerStarted","Data":"cc753dff5d2cec907887f4304b5ed2be70eb29dd4cda6d4eeaf7530057e1ec95"} Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.908140 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.908517 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.408498686 +0000 UTC m=+147.094509202 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.909417 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.910683 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" event={"ID":"166e874d-66a8-4f45-9290-43b3578139a6","Type":"ContainerStarted","Data":"7d5f007167c1b19e656e47e0619b4be40dc50048336ab55357cf3fb2fccb21b0"} Nov 22 04:49:03 crc kubenswrapper[4948]: E1122 04:49:03.911119 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.411104099 +0000 UTC m=+147.097114735 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:03 crc kubenswrapper[4948]: I1122 04:49:03.994337 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-7678m"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.010142 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.011090 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.511065417 +0000 UTC m=+147.197075933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.031130 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.035753 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.044270 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-qrscw"] Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.075541 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09733b67_9323_460c_ab8e_e55fbaf31542.slice/crio-c641ad478ac0e002db46812adc0e05ac563fe638b1c92111f4de842d056d60c0 WatchSource:0}: Error finding container c641ad478ac0e002db46812adc0e05ac563fe638b1c92111f4de842d056d60c0: Status 404 returned error can't find the container with id c641ad478ac0e002db46812adc0e05ac563fe638b1c92111f4de842d056d60c0 Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.081214 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2fa87ce_b218_4c18_96cb_1fe0b7e5a248.slice/crio-de28b98983661087bd61a716bfa174c76a923131f1563f194fabc313ef15398f WatchSource:0}: Error finding container de28b98983661087bd61a716bfa174c76a923131f1563f194fabc313ef15398f: Status 404 returned error can't find the container with id de28b98983661087bd61a716bfa174c76a923131f1563f194fabc313ef15398f Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.107202 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.111366 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.111746 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.611730844 +0000 UTC m=+147.297741370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.120739 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.133085 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vgmn6"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.178108 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" podStartSLOduration=125.178091255 podStartE2EDuration="2m5.178091255s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.177790156 +0000 UTC m=+146.863800672" watchObservedRunningTime="2025-11-22 04:49:04.178091255 +0000 UTC m=+146.864101771" Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.187028 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ljtvf"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.191673 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5wl5s"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.201519 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk"] Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.210603 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bd32efc_1948_4c04_bd33_eafa2ea7417c.slice/crio-aad6c91cd222d1b7198b9c6ddf2a489520fcbbd6b4794bbadd9d471564ad1875 WatchSource:0}: Error finding container aad6c91cd222d1b7198b9c6ddf2a489520fcbbd6b4794bbadd9d471564ad1875: Status 404 returned error can't find the container with id aad6c91cd222d1b7198b9c6ddf2a489520fcbbd6b4794bbadd9d471564ad1875 Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.210824 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89163a77_fa00_44c4_aa83_969d24886d65.slice/crio-18fcff98958adac0df8a9e7cb098fbcdf6bb360257d04e9952c644b763d331df WatchSource:0}: Error finding container 18fcff98958adac0df8a9e7cb098fbcdf6bb360257d04e9952c644b763d331df: Status 404 returned error can't find the container with id 18fcff98958adac0df8a9e7cb098fbcdf6bb360257d04e9952c644b763d331df Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.211977 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.216917 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.716892848 +0000 UTC m=+147.402903364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.218623 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.236619 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-ttcbt" podStartSLOduration=125.236601014 podStartE2EDuration="2m5.236601014s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.216739934 +0000 UTC m=+146.902750440" watchObservedRunningTime="2025-11-22 04:49:04.236601014 +0000 UTC m=+146.922611530" Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.238924 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.254612 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-gld76"] Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.267458 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-z8rdw" podStartSLOduration=125.267435213 podStartE2EDuration="2m5.267435213s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.259242092 +0000 UTC m=+146.945252608" watchObservedRunningTime="2025-11-22 04:49:04.267435213 +0000 UTC m=+146.953445729" Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.290944 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7880b3d_8bca_4ad5_a645_0d043cc71538.slice/crio-e0a48a5c6267eedba0432dc30b03c3fa74d865a6e47f6ddca0ff8ef9d967e23d WatchSource:0}: Error finding container e0a48a5c6267eedba0432dc30b03c3fa74d865a6e47f6ddca0ff8ef9d967e23d: Status 404 returned error can't find the container with id e0a48a5c6267eedba0432dc30b03c3fa74d865a6e47f6ddca0ff8ef9d967e23d Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.301225 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda31ec4c4_1be7_4631_9611_3886eecd09fb.slice/crio-5aac67720d1b8393e381d67812e5936510653131d34c8aab3d73e085f52da45f WatchSource:0}: Error finding container 5aac67720d1b8393e381d67812e5936510653131d34c8aab3d73e085f52da45f: Status 404 returned error can't find the container with id 5aac67720d1b8393e381d67812e5936510653131d34c8aab3d73e085f52da45f Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.308814 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" podStartSLOduration=124.308783568 podStartE2EDuration="2m4.308783568s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.301689548 +0000 UTC m=+146.987700064" watchObservedRunningTime="2025-11-22 04:49:04.308783568 +0000 UTC m=+146.994794084" Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.312681 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e426879_bc45_4bad_8353_fe9be602ffb2.slice/crio-05ebcc9baf56f9a4c61e6c395287ec2570316670e45925094d7d3e78e601f686 WatchSource:0}: Error finding container 05ebcc9baf56f9a4c61e6c395287ec2570316670e45925094d7d3e78e601f686: Status 404 returned error can't find the container with id 05ebcc9baf56f9a4c61e6c395287ec2570316670e45925094d7d3e78e601f686 Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.317520 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.317870 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.817857204 +0000 UTC m=+147.503867720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.341113 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" podStartSLOduration=125.341099019 podStartE2EDuration="2m5.341099019s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.338039893 +0000 UTC m=+147.024050409" watchObservedRunningTime="2025-11-22 04:49:04.341099019 +0000 UTC m=+147.027109535" Nov 22 04:49:04 crc kubenswrapper[4948]: W1122 04:49:04.367275 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17cb3856_2d7a_49ba_8488_1b15832b26a3.slice/crio-0f83ca0faef06e321d2bfc5526f2374a88132561ce818a7719dc58e360401a68 WatchSource:0}: Error finding container 0f83ca0faef06e321d2bfc5526f2374a88132561ce818a7719dc58e360401a68: Status 404 returned error can't find the container with id 0f83ca0faef06e321d2bfc5526f2374a88132561ce818a7719dc58e360401a68 Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.382392 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-f4mhc" podStartSLOduration=125.382371541 podStartE2EDuration="2m5.382371541s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.381734433 +0000 UTC m=+147.067744959" watchObservedRunningTime="2025-11-22 04:49:04.382371541 +0000 UTC m=+147.068382057" Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.424648 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.425118 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:04.925089205 +0000 UTC m=+147.611099721 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.425625 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" podStartSLOduration=125.425596299 podStartE2EDuration="2m5.425596299s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.423797549 +0000 UTC m=+147.109808065" watchObservedRunningTime="2025-11-22 04:49:04.425596299 +0000 UTC m=+147.111606815" Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.527771 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.528840 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.028823829 +0000 UTC m=+147.714834345 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.630397 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.630625 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.130599328 +0000 UTC m=+147.816609844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.631027 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.631663 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.131650947 +0000 UTC m=+147.817661463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.731746 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.732138 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.232124079 +0000 UTC m=+147.918134595 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.835252 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.836250 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.336235433 +0000 UTC m=+148.022245949 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.937045 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.937144 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" event={"ID":"714d2afb-6b01-4b25-bcb5-d6b0afd0d8f0","Type":"ContainerStarted","Data":"b3c8ff0607c4e5b16fb59e26026b78d3c1fc83932d93c74a8ed1253715c3e1c3"} Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.937343 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.437322133 +0000 UTC m=+148.123332659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.938350 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:04 crc kubenswrapper[4948]: E1122 04:49:04.938864 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.438839765 +0000 UTC m=+148.124850271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.944209 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" event={"ID":"a31ec4c4-1be7-4631-9611-3886eecd09fb","Type":"ContainerStarted","Data":"5aac67720d1b8393e381d67812e5936510653131d34c8aab3d73e085f52da45f"} Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.956577 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qg6ht" podStartSLOduration=125.956556085 podStartE2EDuration="2m5.956556085s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:04.954320762 +0000 UTC m=+147.640331278" watchObservedRunningTime="2025-11-22 04:49:04.956556085 +0000 UTC m=+147.642566601" Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.989857 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" event={"ID":"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4","Type":"ContainerStarted","Data":"e53ea476863cd748f42850c7d2de1e802d68288e91d5082ba3a6a125708c509d"} Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.993092 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" event={"ID":"ba391380-ff01-461f-b4e7-daa1f18d0198","Type":"ContainerStarted","Data":"b8b9252ffa8258057cac7c68c107820a8deaa2166ae863c1224989a9a65779ea"} Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.996419 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" event={"ID":"eff606a7-5cf1-461f-a63f-225dc013ef4b","Type":"ContainerStarted","Data":"3534eec3c9fc76963b07a0b5219b6a9845e1a1e397fd9894d4463042335e699a"} Nov 22 04:49:04 crc kubenswrapper[4948]: I1122 04:49:04.997794 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.001710 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" event={"ID":"be4dab2f-0e4f-464f-8a71-5fe7baa561a5","Type":"ContainerStarted","Data":"059fdce448c8487de7163a0ff289a037498f3f9752063ceb11012a4804cb7960"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.001743 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" event={"ID":"be4dab2f-0e4f-464f-8a71-5fe7baa561a5","Type":"ContainerStarted","Data":"b8fa69bfec18d6ead6876d39067f1ffef441829ea794caed6213d9b55292f86c"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.004859 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" podStartSLOduration=125.004838296 podStartE2EDuration="2m5.004838296s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.003548899 +0000 UTC m=+147.689559415" watchObservedRunningTime="2025-11-22 04:49:05.004838296 +0000 UTC m=+147.690848812" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.019141 4948 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-4hf57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.019185 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" podUID="eff606a7-5cf1-461f-a63f-225dc013ef4b" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.026386 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" podStartSLOduration=125.026366652 podStartE2EDuration="2m5.026366652s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.026299921 +0000 UTC m=+147.712310437" watchObservedRunningTime="2025-11-22 04:49:05.026366652 +0000 UTC m=+147.712377168" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.029546 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" event={"ID":"00104340-651c-488e-ae76-53e040f58218","Type":"ContainerStarted","Data":"05f2020dc3f16bd8c3e1814f0bcba237b0670104da213a9672519fc76f806abe"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.040069 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.041142 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.541127578 +0000 UTC m=+148.227138084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.046659 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" event={"ID":"104266b0-fe3d-4831-8853-e0f964dee743","Type":"ContainerStarted","Data":"824039a5565f777d982a0bac6df9759f4b76cb9d7cea3c6291d08fe4db0b4155"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.053789 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" event={"ID":"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d","Type":"ContainerStarted","Data":"d2e5623449d2d9392aedbe45dbca78c9ee4b34dbe96406093cdee1b049832d62"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.065841 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b5v6z" podStartSLOduration=126.065456134 podStartE2EDuration="2m6.065456134s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.062527362 +0000 UTC m=+147.748537898" watchObservedRunningTime="2025-11-22 04:49:05.065456134 +0000 UTC m=+147.751466650" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.068957 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-lpvjc" event={"ID":"c74bf710-a8fa-4d55-9f68-771f56c145f7","Type":"ContainerStarted","Data":"1ac3497c0f3cecc894e5adbbf3bf3ecca141271a2f16c9c7efcd3c2279ab13c4"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.069529 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-lpvjc" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.078443 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" event={"ID":"fdc54e28-5b97-48c7-824a-3ed65296e98e","Type":"ContainerStarted","Data":"a4e7c14c87100d87480fe995d1cd57762d543b48092d44501cce8d09930a7aaa"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.078714 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-lpvjc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.078754 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lpvjc" podUID="c74bf710-a8fa-4d55-9f68-771f56c145f7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.079989 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" event={"ID":"2e426879-bc45-4bad-8353-fe9be602ffb2","Type":"ContainerStarted","Data":"05ebcc9baf56f9a4c61e6c395287ec2570316670e45925094d7d3e78e601f686"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.082354 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" event={"ID":"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c","Type":"ContainerStarted","Data":"0399a8e5af079ae91e6ad0017e2bfbca4a35761426ff91e0a6699ac596744b44"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.082390 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" event={"ID":"33fef7f7-8e0c-475b-9e5b-8b3eab7b609c","Type":"ContainerStarted","Data":"7682bf4d81206cd8a209dd5554fed0f1e63c084bb7f6532de43bc660c65812c7"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.097887 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" event={"ID":"f0c31760-2506-4e99-b6ee-b072b77d60f7","Type":"ContainerStarted","Data":"b3479bc2daad0bb858638cfd879f64becf9505c66cabebf667e7a07b8fc6f189"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.106051 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-lpvjc" podStartSLOduration=126.106036268 podStartE2EDuration="2m6.106036268s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.105329428 +0000 UTC m=+147.791339944" watchObservedRunningTime="2025-11-22 04:49:05.106036268 +0000 UTC m=+147.792046784" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.107157 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" event={"ID":"c7226f25-d408-493a-a5bd-634480a933ad","Type":"ContainerStarted","Data":"54fa0c39e0d4c3a7938767bc93586e5ad218a9a68e8a4ce3a20e222833a0be5b"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.107824 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6rhjl" podStartSLOduration=125.107817448 podStartE2EDuration="2m5.107817448s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.086721374 +0000 UTC m=+147.772731890" watchObservedRunningTime="2025-11-22 04:49:05.107817448 +0000 UTC m=+147.793827964" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.113508 4948 generic.go:334] "Generic (PLEG): container finished" podID="1682ca74-e62f-492c-8de6-fa0fc27c5b39" containerID="e480ce1780f880b70ba34c264e182e744367b4d518cf06d3412093c6176ea004" exitCode=0 Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.113571 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" event={"ID":"1682ca74-e62f-492c-8de6-fa0fc27c5b39","Type":"ContainerDied","Data":"e480ce1780f880b70ba34c264e182e744367b4d518cf06d3412093c6176ea004"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.124476 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" event={"ID":"19e41764-1439-4a74-a6ce-ffacb3448577","Type":"ContainerStarted","Data":"7a5f9a1cb5adedb1b727b1250441215af15afe72974719988e13857ca86f0640"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.128371 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-6qmls" podStartSLOduration=125.128357117 podStartE2EDuration="2m5.128357117s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.127374049 +0000 UTC m=+147.813384565" watchObservedRunningTime="2025-11-22 04:49:05.128357117 +0000 UTC m=+147.814367633" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.132642 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-h8wxv" event={"ID":"ae58b080-edff-4582-839f-fc67d5b0b981","Type":"ContainerStarted","Data":"9884f9809faa9fa71d8083cb6a53af1f9e0fd2d819122f955c7a3968d0e53393"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.134670 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-22sb9" event={"ID":"dd9b5929-4c7a-4598-922b-a5a37c9f4443","Type":"ContainerStarted","Data":"014fc27b005f3891ce429ee78aca4b07b5d66b38375b9e607fdd6082a620800c"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.139266 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" event={"ID":"09733b67-9323-460c-ab8e-e55fbaf31542","Type":"ContainerStarted","Data":"62d7c1240a590858aa9ef1ec8c396fcd7767f994d656bc2d9cb97b2d1fc4f319"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.139300 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" event={"ID":"09733b67-9323-460c-ab8e-e55fbaf31542","Type":"ContainerStarted","Data":"c641ad478ac0e002db46812adc0e05ac563fe638b1c92111f4de842d056d60c0"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.142351 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.142936 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.642918868 +0000 UTC m=+148.328929374 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.147827 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" event={"ID":"17cb3856-2d7a-49ba-8488-1b15832b26a3","Type":"ContainerStarted","Data":"0f83ca0faef06e321d2bfc5526f2374a88132561ce818a7719dc58e360401a68"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.152297 4948 generic.go:334] "Generic (PLEG): container finished" podID="0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e" containerID="707612acb7e30891bdf7feba53715684ec0aef8d475be12d3a7e7bc47568ecec" exitCode=0 Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.152382 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" event={"ID":"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e","Type":"ContainerDied","Data":"707612acb7e30891bdf7feba53715684ec0aef8d475be12d3a7e7bc47568ecec"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.170747 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" event={"ID":"b5f1f7f2-3a3e-464d-84f7-69e726b785a7","Type":"ContainerStarted","Data":"ff28f1a094c4b7c6fb5d534a788bd4ee3482873b2dcf27ef652a4ffaf8464971"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.175503 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" event={"ID":"8c6c1aea-8440-4112-ac10-b638520ec37f","Type":"ContainerStarted","Data":"bb20bfed84853ae9e886635f07fa7716a58bc4ac686554278753837dc55efa2a"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.177311 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" event={"ID":"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248","Type":"ContainerStarted","Data":"01aa44c358e2f33040224d0d6bae77159258806495800fcec3491550879c8419"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.177346 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" event={"ID":"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248","Type":"ContainerStarted","Data":"de28b98983661087bd61a716bfa174c76a923131f1563f194fabc313ef15398f"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.178412 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ljtvf" event={"ID":"c7880b3d-8bca-4ad5-a645-0d043cc71538","Type":"ContainerStarted","Data":"e0a48a5c6267eedba0432dc30b03c3fa74d865a6e47f6ddca0ff8ef9d967e23d"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.179908 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" event={"ID":"6bd32efc-1948-4c04-bd33-eafa2ea7417c","Type":"ContainerStarted","Data":"aad6c91cd222d1b7198b9c6ddf2a489520fcbbd6b4794bbadd9d471564ad1875"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.181297 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" event={"ID":"c09cae21-e7d6-4726-8cc3-44d49ddcf202","Type":"ContainerStarted","Data":"e959531cf55ee84ed8bf5f105f728e4e6c8ba599a52b184c4e7e04904041000a"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.202992 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-zjh6q" podStartSLOduration=125.20296873 podStartE2EDuration="2m5.20296873s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.1795538 +0000 UTC m=+147.865564316" watchObservedRunningTime="2025-11-22 04:49:05.20296873 +0000 UTC m=+147.888979246" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.212786 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kmbfh" event={"ID":"3ce40099-332b-49b0-8eee-914df6a6a572","Type":"ContainerStarted","Data":"53a7e548ef38fbae2ffe2e9383d49cf2fec57589ab0742de4520d2417ee5bb37"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.228692 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.229549 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-h4z45" podStartSLOduration=125.229533659 podStartE2EDuration="2m5.229533659s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.227873932 +0000 UTC m=+147.913884448" watchObservedRunningTime="2025-11-22 04:49:05.229533659 +0000 UTC m=+147.915544165" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.230072 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-c6tw5" podStartSLOduration=125.230066764 podStartE2EDuration="2m5.230066764s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.208078824 +0000 UTC m=+147.894089340" watchObservedRunningTime="2025-11-22 04:49:05.230066764 +0000 UTC m=+147.916077280" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.233757 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.233813 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.234182 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" event={"ID":"9f863054-d2f5-4952-ab56-026b3a2bf341","Type":"ContainerStarted","Data":"0134bf146ccaf3803cbefe662cf1779008acb89afb63f34262c14e896dcee4dc"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.235492 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" event={"ID":"89163a77-fa00-44c4-aa83-969d24886d65","Type":"ContainerStarted","Data":"18fcff98958adac0df8a9e7cb098fbcdf6bb360257d04e9952c644b763d331df"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.238941 4948 generic.go:334] "Generic (PLEG): container finished" podID="005ccf51-a81d-4fa0-b9ac-468732a14edf" containerID="a978b19bcfeb496ddfbd5af477500a1e613c560f2d31492d436e8c9d19b6e7e2" exitCode=0 Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.238997 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" event={"ID":"005ccf51-a81d-4fa0-b9ac-468732a14edf","Type":"ContainerDied","Data":"a978b19bcfeb496ddfbd5af477500a1e613c560f2d31492d436e8c9d19b6e7e2"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.240894 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-qrscw" event={"ID":"f69db977-2a9e-4f6b-8bf9-1abfd73b373f","Type":"ContainerStarted","Data":"b930c678bc6366b11d997453bd9d45d25c35b685251a3d2153b69edd719949b5"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.242556 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" event={"ID":"aaf64a47-9260-4ab8-83da-238e80d4965b","Type":"ContainerStarted","Data":"d382ae16834140974abe93b2a51f058f31064cc21c0dce498c940e58c3895666"} Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.243610 4948 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-l7pw8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.243651 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" podUID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.246051 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.246291 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.74626845 +0000 UTC m=+148.432279016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.246496 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.248286 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.748274637 +0000 UTC m=+148.434285153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.250009 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-h8wxv" podStartSLOduration=125.249995865 podStartE2EDuration="2m5.249995865s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.245341434 +0000 UTC m=+147.931351950" watchObservedRunningTime="2025-11-22 04:49:05.249995865 +0000 UTC m=+147.936006391" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.255630 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-nfpm6" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.257411 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.263891 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7czmn" podStartSLOduration=125.263871417 podStartE2EDuration="2m5.263871417s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.258551347 +0000 UTC m=+147.944561863" watchObservedRunningTime="2025-11-22 04:49:05.263871417 +0000 UTC m=+147.949881953" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.292332 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-n4hqv" podStartSLOduration=125.292317278 podStartE2EDuration="2m5.292317278s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.290098776 +0000 UTC m=+147.976109292" watchObservedRunningTime="2025-11-22 04:49:05.292317278 +0000 UTC m=+147.978327794" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.304427 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.308283 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-22sb9" podStartSLOduration=6.308271678 podStartE2EDuration="6.308271678s" podCreationTimestamp="2025-11-22 04:48:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.307199818 +0000 UTC m=+147.993210334" watchObservedRunningTime="2025-11-22 04:49:05.308271678 +0000 UTC m=+147.994282194" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.333772 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-mgqq5" podStartSLOduration=125.333754516 podStartE2EDuration="2m5.333754516s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.331606526 +0000 UTC m=+148.017617042" watchObservedRunningTime="2025-11-22 04:49:05.333754516 +0000 UTC m=+148.019765052" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.347581 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.347736 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.84771188 +0000 UTC m=+148.533722396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.348162 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.354265 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.854247974 +0000 UTC m=+148.540258490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.452079 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.452412 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:05.95239695 +0000 UTC m=+148.638407456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.555135 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.555437 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.055426794 +0000 UTC m=+148.741437310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.587127 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" podStartSLOduration=125.587111307 podStartE2EDuration="2m5.587111307s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.523741521 +0000 UTC m=+148.209752037" watchObservedRunningTime="2025-11-22 04:49:05.587111307 +0000 UTC m=+148.273121823" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.654235 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-kmbfh" podStartSLOduration=126.654218529 podStartE2EDuration="2m6.654218529s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:05.649813404 +0000 UTC m=+148.335823920" watchObservedRunningTime="2025-11-22 04:49:05.654218529 +0000 UTC m=+148.340229035" Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.657161 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.657655 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.157636715 +0000 UTC m=+148.843647231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.765393 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.770752 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.270731783 +0000 UTC m=+148.956742309 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.867204 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.867563 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.367547641 +0000 UTC m=+149.053558157 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:05 crc kubenswrapper[4948]: I1122 04:49:05.968714 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:05 crc kubenswrapper[4948]: E1122 04:49:05.969044 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.469033262 +0000 UTC m=+149.155043778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.069644 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.069846 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.569815242 +0000 UTC m=+149.255825758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.070088 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.070407 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.570398289 +0000 UTC m=+149.256408885 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.171517 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.171788 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.671774226 +0000 UTC m=+149.357784742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.231001 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.231059 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.249564 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" event={"ID":"2e426879-bc45-4bad-8353-fe9be602ffb2","Type":"ContainerStarted","Data":"b909f31557a78db57195393715ba40520b5008f9fb894deac30ba6ffa97a76f2"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.249721 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.250652 4948 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-s4wrk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.250700 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" podUID="2e426879-bc45-4bad-8353-fe9be602ffb2" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.251955 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" event={"ID":"c2fa87ce-b218-4c18-96cb-1fe0b7e5a248","Type":"ContainerStarted","Data":"8584a2e0a639d88113bb492218292dbeb6c391c5de3013ddd6ab18b157a9c433"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.253520 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" event={"ID":"005ccf51-a81d-4fa0-b9ac-468732a14edf","Type":"ContainerStarted","Data":"e2ca30929a9c139f5b72a990cc7bfdaf75fa8e6fd9b8ad9be064069cde318e4c"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.255190 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" event={"ID":"6bd32efc-1948-4c04-bd33-eafa2ea7417c","Type":"ContainerStarted","Data":"1c4a516064df6ee530d20df46a3d81883bcee228e52590ead3dbcbce189b5074"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.255213 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" event={"ID":"6bd32efc-1948-4c04-bd33-eafa2ea7417c","Type":"ContainerStarted","Data":"671dae5b49d139b0d1fb72f0a80ae3fe1c009b4e353db9ace368ec21e99f4780"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.256620 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" event={"ID":"104266b0-fe3d-4831-8853-e0f964dee743","Type":"ContainerStarted","Data":"8ebccd549a7a08dc937131328b6fc8306209d198907f3131e41d7d73daca1535"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.257650 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" event={"ID":"a31ec4c4-1be7-4631-9611-3886eecd09fb","Type":"ContainerStarted","Data":"b1edbd8476168729b72eb6a004d8c069d0a4d2aabfc5b8eee65c4146c4c4d35f"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.257992 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.259307 4948 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-c7vs6 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.259337 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" podUID="a31ec4c4-1be7-4631-9611-3886eecd09fb" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.259389 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-qrscw" event={"ID":"f69db977-2a9e-4f6b-8bf9-1abfd73b373f","Type":"ContainerStarted","Data":"551ac44224c4ba4be49e2aef7fcb4f95dfc99861647e15db0d32148c66eac528"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.260665 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ljtvf" event={"ID":"c7880b3d-8bca-4ad5-a645-0d043cc71538","Type":"ContainerStarted","Data":"580340a379e7a620e49ba2806cab8b0836d70b869c32a7ed4c5a8c5cbc61f2d2"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.267875 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" event={"ID":"fdc54e28-5b97-48c7-824a-3ed65296e98e","Type":"ContainerStarted","Data":"517041c821ec3bf0e562b5091db6b8b406f028f65374f4e0a4fed8c402ef1ec4"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.273980 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.274366 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.774347397 +0000 UTC m=+149.460357953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.275124 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" event={"ID":"1682ca74-e62f-492c-8de6-fa0fc27c5b39","Type":"ContainerStarted","Data":"eb018c48ffa282f1adcf1d497c030ca4852ae9bb3a170a62570b85310b1075ec"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.276812 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" event={"ID":"0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e","Type":"ContainerStarted","Data":"b2ba3a8bb77d8298aa71242a2cc7c051b4ccf6f37d80d08b984440021c96a023"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.277163 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.279394 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" event={"ID":"00104340-651c-488e-ae76-53e040f58218","Type":"ContainerStarted","Data":"cc44f157b9beba9f44402d824b90a6d64724f3b0730dd9c0139266bda29e6b11"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.279417 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" event={"ID":"00104340-651c-488e-ae76-53e040f58218","Type":"ContainerStarted","Data":"6693abbd1ae83e59c2f825eb157519577fc969cb6c2e435d0f23c46f3303c9ae"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.282977 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" event={"ID":"9f863054-d2f5-4952-ab56-026b3a2bf341","Type":"ContainerStarted","Data":"99d52de4c42a8acf23c6c369df79c188a37d7591a2c7162efc6166fb2463faed"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.284792 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" event={"ID":"17cb3856-2d7a-49ba-8488-1b15832b26a3","Type":"ContainerStarted","Data":"dcae5a1ef65243d42c8aa9bb39a7384945de0813973bb5335ba12a6e0b18589a"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.284813 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" event={"ID":"17cb3856-2d7a-49ba-8488-1b15832b26a3","Type":"ContainerStarted","Data":"93450dc4328ea4c54b60e74f532b1878d8fda6ea4a45cbb46365a4a58f96094d"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.288327 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" event={"ID":"89163a77-fa00-44c4-aa83-969d24886d65","Type":"ContainerStarted","Data":"7ff744f5c943966bc9ecb6ea0b954b8bed13df258fe3a4bc1baf19558ec53130"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.293074 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-5tvbr" podStartSLOduration=127.293061155 podStartE2EDuration="2m7.293061155s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.292817478 +0000 UTC m=+148.978827994" watchObservedRunningTime="2025-11-22 04:49:06.293061155 +0000 UTC m=+148.979071661" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.293913 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" event={"ID":"19e41764-1439-4a74-a6ce-ffacb3448577","Type":"ContainerStarted","Data":"28383c5fe41862974ee85ccdf491591c2d4ef3fe7d8322d9126a8df1800426c5"} Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.294643 4948 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-4hf57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.294684 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" podUID="eff606a7-5cf1-461f-a63f-225dc013ef4b" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.294837 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.295259 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-lpvjc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.295296 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lpvjc" podUID="c74bf710-a8fa-4d55-9f68-771f56c145f7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.301141 4948 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bcq8w container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.301195 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" podUID="aaf64a47-9260-4ab8-83da-238e80d4965b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.303519 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" podStartSLOduration=126.303503629 podStartE2EDuration="2m6.303503629s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.268625076 +0000 UTC m=+148.954635612" watchObservedRunningTime="2025-11-22 04:49:06.303503629 +0000 UTC m=+148.989514145" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.345281 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" podStartSLOduration=126.345245666 podStartE2EDuration="2m6.345245666s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.32481111 +0000 UTC m=+149.010821626" watchObservedRunningTime="2025-11-22 04:49:06.345245666 +0000 UTC m=+149.031256182" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.345432 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-w8qrc" podStartSLOduration=127.345427591 podStartE2EDuration="2m7.345427591s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.342509558 +0000 UTC m=+149.028520074" watchObservedRunningTime="2025-11-22 04:49:06.345427591 +0000 UTC m=+149.031438107" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.375018 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.376917 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.876892578 +0000 UTC m=+149.562903094 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.380524 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-ljtvf" podStartSLOduration=7.380506529 podStartE2EDuration="7.380506529s" podCreationTimestamp="2025-11-22 04:48:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.375126168 +0000 UTC m=+149.061136684" watchObservedRunningTime="2025-11-22 04:49:06.380506529 +0000 UTC m=+149.066517045" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.477162 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.477619 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-7678m" podStartSLOduration=126.477588586 podStartE2EDuration="2m6.477588586s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.47632075 +0000 UTC m=+149.162331266" watchObservedRunningTime="2025-11-22 04:49:06.477588586 +0000 UTC m=+149.163599102" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.479925 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" podStartSLOduration=126.479915381 podStartE2EDuration="2m6.479915381s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.422617606 +0000 UTC m=+149.108628122" watchObservedRunningTime="2025-11-22 04:49:06.479915381 +0000 UTC m=+149.165925897" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.477668 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:06.977649167 +0000 UTC m=+149.663659683 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.552857 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gld76" podStartSLOduration=126.552827616 podStartE2EDuration="2m6.552827616s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.544486941 +0000 UTC m=+149.230497477" watchObservedRunningTime="2025-11-22 04:49:06.552827616 +0000 UTC m=+149.238838132" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.584579 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.584888 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.587093 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.587254 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.087232776 +0000 UTC m=+149.773243292 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.587426 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.587832 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.087825033 +0000 UTC m=+149.773835549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.589622 4948 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-xlnzv container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.589713 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" podUID="005ccf51-a81d-4fa0-b9ac-468732a14edf" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.590769 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78blh" podStartSLOduration=126.590752085 podStartE2EDuration="2m6.590752085s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.589536101 +0000 UTC m=+149.275546617" watchObservedRunningTime="2025-11-22 04:49:06.590752085 +0000 UTC m=+149.276762601" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.689157 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.689289 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.189269372 +0000 UTC m=+149.875279888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.689401 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.689725 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.189718475 +0000 UTC m=+149.875728991 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.693754 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w7xl7" podStartSLOduration=126.693737088 podStartE2EDuration="2m6.693737088s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.631110093 +0000 UTC m=+149.317120609" watchObservedRunningTime="2025-11-22 04:49:06.693737088 +0000 UTC m=+149.379747604" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.723665 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" podStartSLOduration=127.723639501 podStartE2EDuration="2m7.723639501s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.694764767 +0000 UTC m=+149.380775283" watchObservedRunningTime="2025-11-22 04:49:06.723639501 +0000 UTC m=+149.409650017" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.767857 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vgmn6" podStartSLOduration=126.767840087 podStartE2EDuration="2m6.767840087s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.765662695 +0000 UTC m=+149.451673211" watchObservedRunningTime="2025-11-22 04:49:06.767840087 +0000 UTC m=+149.453850593" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.768901 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-t9v4g" podStartSLOduration=126.768896366 podStartE2EDuration="2m6.768896366s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:06.723565329 +0000 UTC m=+149.409575845" watchObservedRunningTime="2025-11-22 04:49:06.768896366 +0000 UTC m=+149.454906882" Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.790779 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.790955 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.290929827 +0000 UTC m=+149.976940343 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.791070 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.791393 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.29138043 +0000 UTC m=+149.977390946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.892160 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.892372 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.392340016 +0000 UTC m=+150.078350542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.892582 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.892967 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.392955463 +0000 UTC m=+150.078965969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.994320 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.994479 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.494441313 +0000 UTC m=+150.180451819 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:06 crc kubenswrapper[4948]: I1122 04:49:06.994756 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:06 crc kubenswrapper[4948]: E1122 04:49:06.995055 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.49504784 +0000 UTC m=+150.181058356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.095893 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.096090 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.596053487 +0000 UTC m=+150.282064003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.096291 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.096602 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.596589402 +0000 UTC m=+150.282599918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.197039 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.197175 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.697151737 +0000 UTC m=+150.383162253 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.197270 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.197536 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.697524607 +0000 UTC m=+150.383535123 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.234661 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:07 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:07 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:07 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.234722 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.298857 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.298971 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.798949946 +0000 UTC m=+150.484960462 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.299380 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.299632 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.799624875 +0000 UTC m=+150.485635391 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.301635 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" event={"ID":"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d","Type":"ContainerStarted","Data":"bdafa53229228281108e08aa0b74897ac6f78fbb785ea34464e35837c14594aa"} Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.303040 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-qrscw" event={"ID":"f69db977-2a9e-4f6b-8bf9-1abfd73b373f","Type":"ContainerStarted","Data":"95c7da355cbd884360400dca9f17a7b0a7ab01d6468192c529bf8e63845ca5b8"} Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.303106 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.305165 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" event={"ID":"1682ca74-e62f-492c-8de6-fa0fc27c5b39","Type":"ContainerStarted","Data":"124bc96e144c27ffe8e7f9881b39095aea4eefe02bf6e71bfe859bc31381193a"} Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.306019 4948 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bcq8w container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.306930 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" podUID="aaf64a47-9260-4ab8-83da-238e80d4965b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.306273 4948 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-s4wrk container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.307152 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" podUID="2e426879-bc45-4bad-8353-fe9be602ffb2" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.306371 4948 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-4hf57 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.307195 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" podUID="eff606a7-5cf1-461f-a63f-225dc013ef4b" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.306625 4948 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-c7vs6 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.307217 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" podUID="a31ec4c4-1be7-4631-9611-3886eecd09fb" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.307892 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-lpvjc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.308185 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lpvjc" podUID="c74bf710-a8fa-4d55-9f68-771f56c145f7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.324163 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-qrscw" podStartSLOduration=8.324148446 podStartE2EDuration="8.324148446s" podCreationTimestamp="2025-11-22 04:48:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:07.320335679 +0000 UTC m=+150.006346195" watchObservedRunningTime="2025-11-22 04:49:07.324148446 +0000 UTC m=+150.010158962" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.400993 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.401141 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.901114726 +0000 UTC m=+150.587125242 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.403894 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.404265 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:07.904251514 +0000 UTC m=+150.590262100 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.411072 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" podStartSLOduration=127.411055386 podStartE2EDuration="2m7.411055386s" podCreationTimestamp="2025-11-22 04:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:07.408289718 +0000 UTC m=+150.094300234" watchObservedRunningTime="2025-11-22 04:49:07.411055386 +0000 UTC m=+150.097065902" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.432401 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" podStartSLOduration=128.432381987 podStartE2EDuration="2m8.432381987s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:07.431718468 +0000 UTC m=+150.117728984" watchObservedRunningTime="2025-11-22 04:49:07.432381987 +0000 UTC m=+150.118392503" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.506253 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.507840 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.007824073 +0000 UTC m=+150.693834589 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.608039 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.608125 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.608420 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.108408978 +0000 UTC m=+150.794419484 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.631507 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.709459 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.709720 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.209690803 +0000 UTC m=+150.895701319 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.709980 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.710073 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.710155 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.710277 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.710457 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.210439134 +0000 UTC m=+150.896449650 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.715441 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.715741 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.716974 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.811621 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.811758 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.311732649 +0000 UTC m=+150.997743165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.812236 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.812568 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.312556562 +0000 UTC m=+150.998567078 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.913090 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:07 crc kubenswrapper[4948]: E1122 04:49:07.913541 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.413512988 +0000 UTC m=+151.099523504 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.978716 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 04:49:07 crc kubenswrapper[4948]: I1122 04:49:07.991416 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.001508 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.014834 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.015159 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.515144591 +0000 UTC m=+151.201155107 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.115823 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.116302 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.616286962 +0000 UTC m=+151.302297478 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.217841 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.218144 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.718131353 +0000 UTC m=+151.404141869 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.233641 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:08 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:08 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:08 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.233698 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.312330 4948 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-jmcp4 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.312381 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" podUID="0d4ce85e-38be-44a4-b4cd-d7c8ba6a109e" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.318956 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.319193 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.81915977 +0000 UTC m=+151.505170306 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.319264 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.319584 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.819570702 +0000 UTC m=+151.505581218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.420216 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.420703 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.920662621 +0000 UTC m=+151.606673137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.421958 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.424118 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:08.924100658 +0000 UTC m=+151.610111264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.527864 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.528027 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.028002467 +0000 UTC m=+151.714012983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.528092 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.528393 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.028381467 +0000 UTC m=+151.714391983 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.631795 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.632221 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.132149362 +0000 UTC m=+151.818159878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.738189 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.739020 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.239005794 +0000 UTC m=+151.925016310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.839166 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.839535 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.339518397 +0000 UTC m=+152.025528913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:08 crc kubenswrapper[4948]: I1122 04:49:08.940693 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:08 crc kubenswrapper[4948]: E1122 04:49:08.941052 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.441041338 +0000 UTC m=+152.127051854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.041943 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.042338 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.542306372 +0000 UTC m=+152.228316888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.143572 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.143942 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.643925077 +0000 UTC m=+152.329935593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.232103 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:09 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:09 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:09 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.232160 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.244594 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.244765 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.744740278 +0000 UTC m=+152.430750794 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.244895 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.245250 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.745241702 +0000 UTC m=+152.431252218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.317714 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"c18c2c39d9aa589a4d19396843ef3a34479641d9ce5d12681e258f7bfbe7363b"} Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.317790 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"6eaf95f24560f55e10d791f1f9f56dc3ac0e0f8ed659e9825f7fd06243ee01ce"} Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.320161 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e3ea7209abb64c2d6e77d77d96bb30d3e5e96079f6daf218fc4d9c827980b7cc"} Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.320212 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"466759b5e218f13706d7b6e58ce5186525d17fa093b4c732f9441e387ef433a1"} Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.321717 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"0ad3e3a18da480394efe82270cd650639d958f5446b307e40ffbb4d91bc7e5f4"} Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.321740 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5e16c2263252fa2e6aa2fa81be79709437fb0542ad6a7fb0ba2dacf36ebfa826"} Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.322172 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.346063 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.346231 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.846212958 +0000 UTC m=+152.532223474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.346269 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.346619 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.84661256 +0000 UTC m=+152.532623076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.446806 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.447063 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.94703311 +0000 UTC m=+152.633043626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.447179 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.448230 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:09.948210973 +0000 UTC m=+152.634221569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.548253 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.548538 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.0485222 +0000 UTC m=+152.734532716 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.649537 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.649891 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.149874797 +0000 UTC m=+152.835885313 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.750030 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.750243 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.250221825 +0000 UTC m=+152.936232341 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.750347 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.750619 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.250609676 +0000 UTC m=+152.936620192 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.861970 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.862132 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.362109719 +0000 UTC m=+153.048120235 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.862334 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.862679 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.362670945 +0000 UTC m=+153.048681461 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.867852 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.868501 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.871719 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.871907 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.916330 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.963097 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.963285 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.46326539 +0000 UTC m=+153.149275906 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.963344 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.963414 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79db20f8-8a84-466f-9e2d-449a0643a469-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:09 crc kubenswrapper[4948]: I1122 04:49:09.963479 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79db20f8-8a84-466f-9e2d-449a0643a469-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:09 crc kubenswrapper[4948]: E1122 04:49:09.963758 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.463742904 +0000 UTC m=+153.149753410 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.064361 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.064534 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79db20f8-8a84-466f-9e2d-449a0643a469-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.064581 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.564554875 +0000 UTC m=+153.250565391 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.064627 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79db20f8-8a84-466f-9e2d-449a0643a469-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.064639 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.064718 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79db20f8-8a84-466f-9e2d-449a0643a469-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.064908 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.564900245 +0000 UTC m=+153.250910761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.089077 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79db20f8-8a84-466f-9e2d-449a0643a469-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.165386 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.165615 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.665582973 +0000 UTC m=+153.351593499 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.184583 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.231810 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:10 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:10 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:10 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.231868 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.266134 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.266658 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.766638911 +0000 UTC m=+153.452649477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.367945 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.368181 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.868156342 +0000 UTC m=+153.554166858 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.369131 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.369590 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.869575192 +0000 UTC m=+153.555585708 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.383412 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fmtjw"] Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.385999 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.387018 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fmtjw"] Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.392408 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.473926 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.474133 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rccdm\" (UniqueName: \"kubernetes.io/projected/be825ff5-e561-447a-b1d2-1676b8577454-kube-api-access-rccdm\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.474195 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-catalog-content\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.474235 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-utilities\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.474341 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:10.974327315 +0000 UTC m=+153.660337831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.492324 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 04:49:10 crc kubenswrapper[4948]: W1122 04:49:10.504521 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod79db20f8_8a84_466f_9e2d_449a0643a469.slice/crio-a302765950952806fb9b48287352bbab7de120acfe41e588ce492dad64f49049 WatchSource:0}: Error finding container a302765950952806fb9b48287352bbab7de120acfe41e588ce492dad64f49049: Status 404 returned error can't find the container with id a302765950952806fb9b48287352bbab7de120acfe41e588ce492dad64f49049 Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.562331 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lrv8b"] Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.564263 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.567578 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.576965 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-catalog-content\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.577020 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-utilities\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.577091 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.577143 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rccdm\" (UniqueName: \"kubernetes.io/projected/be825ff5-e561-447a-b1d2-1676b8577454-kube-api-access-rccdm\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.577173 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-utilities\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.577233 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2d94\" (UniqueName: \"kubernetes.io/projected/04278e85-8ed2-4820-adbf-ee745fc13337-kube-api-access-h2d94\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.577293 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-catalog-content\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.578039 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:11.078009997 +0000 UTC m=+153.764020673 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.578505 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-catalog-content\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.578571 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lrv8b"] Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.578665 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-utilities\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.623843 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rccdm\" (UniqueName: \"kubernetes.io/projected/be825ff5-e561-447a-b1d2-1676b8577454-kube-api-access-rccdm\") pod \"certified-operators-fmtjw\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.678299 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.678520 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2d94\" (UniqueName: \"kubernetes.io/projected/04278e85-8ed2-4820-adbf-ee745fc13337-kube-api-access-h2d94\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.678601 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-catalog-content\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.678675 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-utilities\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.679160 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-utilities\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.679295 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:11.179275461 +0000 UTC m=+153.865285987 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.680367 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-catalog-content\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.697455 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2d94\" (UniqueName: \"kubernetes.io/projected/04278e85-8ed2-4820-adbf-ee745fc13337-kube-api-access-h2d94\") pod \"community-operators-lrv8b\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.700240 4948 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.708799 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.764896 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cs2cm"] Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.765811 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.778628 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cs2cm"] Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.780876 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.780942 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-utilities\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.780970 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cpnh\" (UniqueName: \"kubernetes.io/projected/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-kube-api-access-6cpnh\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.781011 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-catalog-content\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.783247 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:11.283232671 +0000 UTC m=+153.969243187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.884271 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.884625 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 04:49:11.384595428 +0000 UTC m=+154.070605944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.885415 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cpnh\" (UniqueName: \"kubernetes.io/projected/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-kube-api-access-6cpnh\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.885552 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-utilities\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.885718 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-catalog-content\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.885944 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.886218 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-utilities\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: E1122 04:49:10.886510 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 04:49:11.386496662 +0000 UTC m=+154.072507178 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v78bn" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.886552 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-catalog-content\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.886663 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.951557 4948 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-22T04:49:10.700284473Z","Handler":null,"Name":""} Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.953230 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cpnh\" (UniqueName: \"kubernetes.io/projected/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-kube-api-access-6cpnh\") pod \"certified-operators-cs2cm\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.968923 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jmcp4" Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.969706 4948 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.969837 4948 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 22 04:49:10 crc kubenswrapper[4948]: I1122 04:49:10.989784 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.006192 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.027314 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fw6ph"] Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.030865 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.045111 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fw6ph"] Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.096305 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-catalog-content\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.096675 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.096723 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc94m\" (UniqueName: \"kubernetes.io/projected/f43c81a6-850d-442c-92b2-1c45ab8e32cf-kube-api-access-hc94m\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.096748 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-utilities\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.108706 4948 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.108753 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.121583 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fmtjw"] Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.160526 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.168719 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v78bn\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.198142 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc94m\" (UniqueName: \"kubernetes.io/projected/f43c81a6-850d-442c-92b2-1c45ab8e32cf-kube-api-access-hc94m\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.198215 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-utilities\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.198290 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-catalog-content\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.198919 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-catalog-content\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.199290 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-utilities\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.225637 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc94m\" (UniqueName: \"kubernetes.io/projected/f43c81a6-850d-442c-92b2-1c45ab8e32cf-kube-api-access-hc94m\") pod \"community-operators-fw6ph\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.241856 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:11 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:11 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:11 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.241920 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.313278 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lrv8b"] Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.369299 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.374495 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.391589 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmtjw" event={"ID":"be825ff5-e561-447a-b1d2-1676b8577454","Type":"ContainerStarted","Data":"d5aa49eb4bd0841e6b29ab0913a610fceec6b9fe2ee31f1c3fd8541b4931e750"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.391636 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmtjw" event={"ID":"be825ff5-e561-447a-b1d2-1676b8577454","Type":"ContainerStarted","Data":"577800ad28cec6f0d0d8e23515fbbdfe66c4b5ff2ebfa9236ade21f5e266bda9"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.411148 4948 generic.go:334] "Generic (PLEG): container finished" podID="df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4" containerID="e53ea476863cd748f42850c7d2de1e802d68288e91d5082ba3a6a125708c509d" exitCode=0 Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.411260 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" event={"ID":"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4","Type":"ContainerDied","Data":"e53ea476863cd748f42850c7d2de1e802d68288e91d5082ba3a6a125708c509d"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.444263 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79db20f8-8a84-466f-9e2d-449a0643a469","Type":"ContainerStarted","Data":"0fef130396122740fd09260bdc4a0f0c6d22d91acfb1dc69a68a1fded7823570"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.444317 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79db20f8-8a84-466f-9e2d-449a0643a469","Type":"ContainerStarted","Data":"a302765950952806fb9b48287352bbab7de120acfe41e588ce492dad64f49049"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.470586 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrv8b" event={"ID":"04278e85-8ed2-4820-adbf-ee745fc13337","Type":"ContainerStarted","Data":"a816d194113fd1d936ab348047e0236370dfe145c8053219737a15a2583ecb90"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.505853 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" event={"ID":"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d","Type":"ContainerStarted","Data":"c30e98d644630c84175d70f58843b4f84078f463bb71714ae34112f100541dff"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.505903 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" event={"ID":"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d","Type":"ContainerStarted","Data":"1d11ed0774801b6903c422d70dfe9695a73d46680b054c14111cfc601b985b95"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.505919 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" event={"ID":"fd10d6c8-51cb-438e-91b8-3ddaf3c3733d","Type":"ContainerStarted","Data":"ebc0d14000ef10c059741b26f8ede96cacdb68429c678781768e59da503328ed"} Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.535806 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.535775711 podStartE2EDuration="2.535775711s" podCreationTimestamp="2025-11-22 04:49:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:11.479861476 +0000 UTC m=+154.165871992" watchObservedRunningTime="2025-11-22 04:49:11.535775711 +0000 UTC m=+154.221786227" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.539226 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cs2cm"] Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.543094 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-5wl5s" podStartSLOduration=12.543076367 podStartE2EDuration="12.543076367s" podCreationTimestamp="2025-11-22 04:48:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:11.54107973 +0000 UTC m=+154.227090236" watchObservedRunningTime="2025-11-22 04:49:11.543076367 +0000 UTC m=+154.229086883" Nov 22 04:49:11 crc kubenswrapper[4948]: W1122 04:49:11.555608 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod268b6b3d_a26e_4d86_9ef9_dba7dc5478cd.slice/crio-4d90b56d0cb5a0795771cead4a0bc76e47d0a38d5c7696b089754ade30952424 WatchSource:0}: Error finding container 4d90b56d0cb5a0795771cead4a0bc76e47d0a38d5c7696b089754ade30952424: Status 404 returned error can't find the container with id 4d90b56d0cb5a0795771cead4a0bc76e47d0a38d5c7696b089754ade30952424 Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.588196 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.603635 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xlnzv" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.642504 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.679933 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-lpvjc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.680156 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lpvjc" podUID="c74bf710-a8fa-4d55-9f68-771f56c145f7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.680001 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-lpvjc container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.680569 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-lpvjc" podUID="c74bf710-a8fa-4d55-9f68-771f56c145f7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.768331 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.949214 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fw6ph"] Nov 22 04:49:11 crc kubenswrapper[4948]: I1122 04:49:11.989109 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v78bn"] Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.046817 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.046877 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.047925 4948 patch_prober.go:28] interesting pod/console-f9d7485db-kmbfh container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.047989 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-kmbfh" podUID="3ce40099-332b-49b0-8eee-914df6a6a572" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.146775 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.146835 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.155161 4948 patch_prober.go:28] interesting pod/apiserver-76f77b778f-xkfvj container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]log ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]etcd ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/generic-apiserver-start-informers ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/max-in-flight-filter ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 22 04:49:12 crc kubenswrapper[4948]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 22 04:49:12 crc kubenswrapper[4948]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/project.openshift.io-projectcache ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-startinformers ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 22 04:49:12 crc kubenswrapper[4948]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 22 04:49:12 crc kubenswrapper[4948]: livez check failed Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.155233 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" podUID="1682ca74-e62f-492c-8de6-fa0fc27c5b39" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.214747 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.228291 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.230828 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:12 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:12 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:12 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.230872 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.273338 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4hf57" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.294731 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.362541 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l8rpk"] Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.363833 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.365988 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.381598 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8rpk"] Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.414713 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-utilities\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.415098 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-catalog-content\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.415163 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jf48\" (UniqueName: \"kubernetes.io/projected/74a8814b-851c-4d5f-833b-ca0c87b76f48-kube-api-access-8jf48\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.482256 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-s4wrk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.512566 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" event={"ID":"1d2caf26-e32d-412f-b764-a050f5a5840c","Type":"ContainerStarted","Data":"204b97ddba5473e9a01aee50f11629b64ab734abb719953ab55eba84ff303052"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.512611 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" event={"ID":"1d2caf26-e32d-412f-b764-a050f5a5840c","Type":"ContainerStarted","Data":"c25002b7dfc514ef1199051fab7b220d25bd09cde42ba5ee01923d6e5ae36e87"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.513326 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.515304 4948 generic.go:334] "Generic (PLEG): container finished" podID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerID="095303b08afab09853115c9997a4dabe121c3d16eac6fbefd393b8426870668f" exitCode=0 Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.515383 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cs2cm" event={"ID":"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd","Type":"ContainerDied","Data":"095303b08afab09853115c9997a4dabe121c3d16eac6fbefd393b8426870668f"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.515432 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cs2cm" event={"ID":"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd","Type":"ContainerStarted","Data":"4d90b56d0cb5a0795771cead4a0bc76e47d0a38d5c7696b089754ade30952424"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.515977 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-utilities\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.516053 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-catalog-content\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.516102 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jf48\" (UniqueName: \"kubernetes.io/projected/74a8814b-851c-4d5f-833b-ca0c87b76f48-kube-api-access-8jf48\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.516342 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-utilities\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.516792 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-catalog-content\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.520117 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.521050 4948 generic.go:334] "Generic (PLEG): container finished" podID="04278e85-8ed2-4820-adbf-ee745fc13337" containerID="5f1a546be0762a2fc27683e4ecaa0004f251d8b3e3c7e0106a7ccc1f8e8ec711" exitCode=0 Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.521229 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrv8b" event={"ID":"04278e85-8ed2-4820-adbf-ee745fc13337","Type":"ContainerDied","Data":"5f1a546be0762a2fc27683e4ecaa0004f251d8b3e3c7e0106a7ccc1f8e8ec711"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.525576 4948 generic.go:334] "Generic (PLEG): container finished" podID="be825ff5-e561-447a-b1d2-1676b8577454" containerID="d5aa49eb4bd0841e6b29ab0913a610fceec6b9fe2ee31f1c3fd8541b4931e750" exitCode=0 Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.525662 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmtjw" event={"ID":"be825ff5-e561-447a-b1d2-1676b8577454","Type":"ContainerDied","Data":"d5aa49eb4bd0841e6b29ab0913a610fceec6b9fe2ee31f1c3fd8541b4931e750"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.529897 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" podStartSLOduration=133.52987993 podStartE2EDuration="2m13.52987993s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:12.528873762 +0000 UTC m=+155.214884278" watchObservedRunningTime="2025-11-22 04:49:12.52987993 +0000 UTC m=+155.215890446" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.530807 4948 generic.go:334] "Generic (PLEG): container finished" podID="79db20f8-8a84-466f-9e2d-449a0643a469" containerID="0fef130396122740fd09260bdc4a0f0c6d22d91acfb1dc69a68a1fded7823570" exitCode=0 Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.530895 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79db20f8-8a84-466f-9e2d-449a0643a469","Type":"ContainerDied","Data":"0fef130396122740fd09260bdc4a0f0c6d22d91acfb1dc69a68a1fded7823570"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.540105 4948 generic.go:334] "Generic (PLEG): container finished" podID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerID="3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b" exitCode=0 Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.540287 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fw6ph" event={"ID":"f43c81a6-850d-442c-92b2-1c45ab8e32cf","Type":"ContainerDied","Data":"3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.540846 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fw6ph" event={"ID":"f43c81a6-850d-442c-92b2-1c45ab8e32cf","Type":"ContainerStarted","Data":"ae057486d21ab643519a2b23047660a82a77939129c0dc8e3ca9f300acf547ec"} Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.548666 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-c7vs6" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.550562 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jf48\" (UniqueName: \"kubernetes.io/projected/74a8814b-851c-4d5f-833b-ca0c87b76f48-kube-api-access-8jf48\") pod \"redhat-marketplace-l8rpk\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.677965 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.765412 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x57l2"] Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.767414 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.793639 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x57l2"] Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.815372 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.818202 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.820894 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phdkz\" (UniqueName: \"kubernetes.io/projected/4ba86786-2cde-4556-949f-f0c07fb471ec-kube-api-access-phdkz\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.820926 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-catalog-content\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.820947 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-utilities\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.821598 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.821768 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.823962 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.844938 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.921969 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksgqs\" (UniqueName: \"kubernetes.io/projected/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-kube-api-access-ksgqs\") pod \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922140 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-config-volume\") pod \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922173 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-secret-volume\") pod \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\" (UID: \"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4\") " Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922454 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922543 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-catalog-content\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922564 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phdkz\" (UniqueName: \"kubernetes.io/projected/4ba86786-2cde-4556-949f-f0c07fb471ec-kube-api-access-phdkz\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922587 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-utilities\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922620 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922632 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-config-volume" (OuterVolumeSpecName: "config-volume") pod "df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4" (UID: "df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.922994 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-catalog-content\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.923846 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-utilities\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.925620 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-kube-api-access-ksgqs" (OuterVolumeSpecName: "kube-api-access-ksgqs") pod "df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4" (UID: "df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4"). InnerVolumeSpecName "kube-api-access-ksgqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.936558 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4" (UID: "df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:49:12 crc kubenswrapper[4948]: I1122 04:49:12.939012 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phdkz\" (UniqueName: \"kubernetes.io/projected/4ba86786-2cde-4556-949f-f0c07fb471ec-kube-api-access-phdkz\") pod \"redhat-marketplace-x57l2\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.023450 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.023574 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.023652 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.023724 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksgqs\" (UniqueName: \"kubernetes.io/projected/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-kube-api-access-ksgqs\") on node \"crc\" DevicePath \"\"" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.023750 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.023760 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.038962 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.099829 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8rpk"] Nov 22 04:49:13 crc kubenswrapper[4948]: W1122 04:49:13.110181 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74a8814b_851c_4d5f_833b_ca0c87b76f48.slice/crio-6d6880097b36956b5a04a11a9ad4475b3edbb78da6c5cd409bcf889d4f372461 WatchSource:0}: Error finding container 6d6880097b36956b5a04a11a9ad4475b3edbb78da6c5cd409bcf889d4f372461: Status 404 returned error can't find the container with id 6d6880097b36956b5a04a11a9ad4475b3edbb78da6c5cd409bcf889d4f372461 Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.130665 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.153857 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.235309 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:13 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:13 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:13 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.235371 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.322563 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x57l2"] Nov 22 04:49:13 crc kubenswrapper[4948]: W1122 04:49:13.338585 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ba86786_2cde_4556_949f_f0c07fb471ec.slice/crio-ea017a8eeba08432ca72872a2c69b9eec72e1e956f24e1fbee777f169c2d431c WatchSource:0}: Error finding container ea017a8eeba08432ca72872a2c69b9eec72e1e956f24e1fbee777f169c2d431c: Status 404 returned error can't find the container with id ea017a8eeba08432ca72872a2c69b9eec72e1e956f24e1fbee777f169c2d431c Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.435697 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.554942 4948 generic.go:334] "Generic (PLEG): container finished" podID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerID="6b20a32128fd78fb2942e9dbec98b174919f15a693113ea4ef315fef40aacea6" exitCode=0 Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.555018 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8rpk" event={"ID":"74a8814b-851c-4d5f-833b-ca0c87b76f48","Type":"ContainerDied","Data":"6b20a32128fd78fb2942e9dbec98b174919f15a693113ea4ef315fef40aacea6"} Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.555075 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8rpk" event={"ID":"74a8814b-851c-4d5f-833b-ca0c87b76f48","Type":"ContainerStarted","Data":"6d6880097b36956b5a04a11a9ad4475b3edbb78da6c5cd409bcf889d4f372461"} Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.558625 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" event={"ID":"df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4","Type":"ContainerDied","Data":"90ac4090590018efd1c496bc331e832e5e8d4938a2712b4e9d8c5561dd82f098"} Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.558664 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90ac4090590018efd1c496bc331e832e5e8d4938a2712b4e9d8c5561dd82f098" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.558732 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396445-9b9j9" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.566841 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ce871a4d-4584-4ed4-9de5-efb3afa1692b","Type":"ContainerStarted","Data":"22e9692dd000842ee51bb0c2b72cb3d0b99ef261cce78e071313868749801e2a"} Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.571403 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x57l2" event={"ID":"4ba86786-2cde-4556-949f-f0c07fb471ec","Type":"ContainerStarted","Data":"47d49a4032a61f2b2e1f97879ebb0503e9cc83e2abd5ca227e9f782c22a7e8ce"} Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.571443 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x57l2" event={"ID":"4ba86786-2cde-4556-949f-f0c07fb471ec","Type":"ContainerStarted","Data":"ea017a8eeba08432ca72872a2c69b9eec72e1e956f24e1fbee777f169c2d431c"} Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.808199 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5lglc"] Nov 22 04:49:13 crc kubenswrapper[4948]: E1122 04:49:13.808950 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4" containerName="collect-profiles" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.808965 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4" containerName="collect-profiles" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.809067 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="df3ebce5-74db-4a9b-a84b-1a48b1b2c5e4" containerName="collect-profiles" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.809759 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5lglc"] Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.809898 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.813059 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.892816 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.955765 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-utilities\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.955839 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-catalog-content\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:13 crc kubenswrapper[4948]: I1122 04:49:13.955866 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4zfc\" (UniqueName: \"kubernetes.io/projected/26a59e1e-ef32-4b87-86ae-eb86aadafcad-kube-api-access-v4zfc\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.056890 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79db20f8-8a84-466f-9e2d-449a0643a469-kube-api-access\") pod \"79db20f8-8a84-466f-9e2d-449a0643a469\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.058070 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79db20f8-8a84-466f-9e2d-449a0643a469-kubelet-dir\") pod \"79db20f8-8a84-466f-9e2d-449a0643a469\" (UID: \"79db20f8-8a84-466f-9e2d-449a0643a469\") " Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.058488 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/79db20f8-8a84-466f-9e2d-449a0643a469-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "79db20f8-8a84-466f-9e2d-449a0643a469" (UID: "79db20f8-8a84-466f-9e2d-449a0643a469"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.059430 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4zfc\" (UniqueName: \"kubernetes.io/projected/26a59e1e-ef32-4b87-86ae-eb86aadafcad-kube-api-access-v4zfc\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.059615 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-utilities\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.060115 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-utilities\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.064656 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-catalog-content\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.065298 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-catalog-content\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.065336 4948 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/79db20f8-8a84-466f-9e2d-449a0643a469-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.078438 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79db20f8-8a84-466f-9e2d-449a0643a469-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "79db20f8-8a84-466f-9e2d-449a0643a469" (UID: "79db20f8-8a84-466f-9e2d-449a0643a469"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.079947 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4zfc\" (UniqueName: \"kubernetes.io/projected/26a59e1e-ef32-4b87-86ae-eb86aadafcad-kube-api-access-v4zfc\") pod \"redhat-operators-5lglc\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.166894 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79db20f8-8a84-466f-9e2d-449a0643a469-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.166992 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9ltfm"] Nov 22 04:49:14 crc kubenswrapper[4948]: E1122 04:49:14.167866 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79db20f8-8a84-466f-9e2d-449a0643a469" containerName="pruner" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.167888 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="79db20f8-8a84-466f-9e2d-449a0643a469" containerName="pruner" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.168031 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="79db20f8-8a84-466f-9e2d-449a0643a469" containerName="pruner" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.169049 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.176360 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9ltfm"] Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.184366 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.234114 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:14 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:14 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:14 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.234189 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.369337 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m22cb\" (UniqueName: \"kubernetes.io/projected/94c90684-8f9b-4922-ad0b-1fa3b77d0767-kube-api-access-m22cb\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.369387 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-utilities\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.369458 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-catalog-content\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.446145 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5lglc"] Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.471323 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-catalog-content\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.471789 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m22cb\" (UniqueName: \"kubernetes.io/projected/94c90684-8f9b-4922-ad0b-1fa3b77d0767-kube-api-access-m22cb\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.471973 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-utilities\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.472394 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-catalog-content\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.472964 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-utilities\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.489199 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m22cb\" (UniqueName: \"kubernetes.io/projected/94c90684-8f9b-4922-ad0b-1fa3b77d0767-kube-api-access-m22cb\") pod \"redhat-operators-9ltfm\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.498768 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.585246 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"79db20f8-8a84-466f-9e2d-449a0643a469","Type":"ContainerDied","Data":"a302765950952806fb9b48287352bbab7de120acfe41e588ce492dad64f49049"} Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.585560 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a302765950952806fb9b48287352bbab7de120acfe41e588ce492dad64f49049" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.585271 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.586857 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ce871a4d-4584-4ed4-9de5-efb3afa1692b","Type":"ContainerStarted","Data":"2b412613b3de2d98211dc9691b595c22a31cef6266405d47722dab6a1d7795d5"} Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.590445 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x57l2" event={"ID":"4ba86786-2cde-4556-949f-f0c07fb471ec","Type":"ContainerDied","Data":"47d49a4032a61f2b2e1f97879ebb0503e9cc83e2abd5ca227e9f782c22a7e8ce"} Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.591208 4948 generic.go:334] "Generic (PLEG): container finished" podID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerID="47d49a4032a61f2b2e1f97879ebb0503e9cc83e2abd5ca227e9f782c22a7e8ce" exitCode=0 Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.592500 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lglc" event={"ID":"26a59e1e-ef32-4b87-86ae-eb86aadafcad","Type":"ContainerStarted","Data":"4cbbda81a4b21aefcb0bb7513cb9426d0f30ba3709c15ead6609bdda9efe3824"} Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.608863 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.608842377 podStartE2EDuration="2.608842377s" podCreationTimestamp="2025-11-22 04:49:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:49:14.599517754 +0000 UTC m=+157.285528270" watchObservedRunningTime="2025-11-22 04:49:14.608842377 +0000 UTC m=+157.294852893" Nov 22 04:49:14 crc kubenswrapper[4948]: I1122 04:49:14.812343 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9ltfm"] Nov 22 04:49:14 crc kubenswrapper[4948]: W1122 04:49:14.839273 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94c90684_8f9b_4922_ad0b_1fa3b77d0767.slice/crio-e3fe38b2a3338591303a53671378168fd0ad0bd688ae0fcbd2684c5e54d4abfc WatchSource:0}: Error finding container e3fe38b2a3338591303a53671378168fd0ad0bd688ae0fcbd2684c5e54d4abfc: Status 404 returned error can't find the container with id e3fe38b2a3338591303a53671378168fd0ad0bd688ae0fcbd2684c5e54d4abfc Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.233926 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:15 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:15 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:15 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.233993 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.598862 4948 generic.go:334] "Generic (PLEG): container finished" podID="ce871a4d-4584-4ed4-9de5-efb3afa1692b" containerID="2b412613b3de2d98211dc9691b595c22a31cef6266405d47722dab6a1d7795d5" exitCode=0 Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.598916 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ce871a4d-4584-4ed4-9de5-efb3afa1692b","Type":"ContainerDied","Data":"2b412613b3de2d98211dc9691b595c22a31cef6266405d47722dab6a1d7795d5"} Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.602716 4948 generic.go:334] "Generic (PLEG): container finished" podID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerID="e93cd15c4fecf7c6fafc565be33c849dd42d75a317f1f28c523f87fa8bb3ffd6" exitCode=0 Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.602768 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ltfm" event={"ID":"94c90684-8f9b-4922-ad0b-1fa3b77d0767","Type":"ContainerDied","Data":"e93cd15c4fecf7c6fafc565be33c849dd42d75a317f1f28c523f87fa8bb3ffd6"} Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.602785 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ltfm" event={"ID":"94c90684-8f9b-4922-ad0b-1fa3b77d0767","Type":"ContainerStarted","Data":"e3fe38b2a3338591303a53671378168fd0ad0bd688ae0fcbd2684c5e54d4abfc"} Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.606061 4948 generic.go:334] "Generic (PLEG): container finished" podID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerID="c458ea9af7e8004240ad8e5eed277cd325ab39584477426f70558e4b4ec85bc8" exitCode=0 Nov 22 04:49:15 crc kubenswrapper[4948]: I1122 04:49:15.606111 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lglc" event={"ID":"26a59e1e-ef32-4b87-86ae-eb86aadafcad","Type":"ContainerDied","Data":"c458ea9af7e8004240ad8e5eed277cd325ab39584477426f70558e4b4ec85bc8"} Nov 22 04:49:16 crc kubenswrapper[4948]: I1122 04:49:16.231346 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:16 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:16 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:16 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:16 crc kubenswrapper[4948]: I1122 04:49:16.231412 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.011035 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.115095 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kubelet-dir\") pod \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.115230 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kube-api-access\") pod \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\" (UID: \"ce871a4d-4584-4ed4-9de5-efb3afa1692b\") " Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.115366 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ce871a4d-4584-4ed4-9de5-efb3afa1692b" (UID: "ce871a4d-4584-4ed4-9de5-efb3afa1692b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.115474 4948 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.121924 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ce871a4d-4584-4ed4-9de5-efb3afa1692b" (UID: "ce871a4d-4584-4ed4-9de5-efb3afa1692b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.152986 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.157916 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-xkfvj" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.223556 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce871a4d-4584-4ed4-9de5-efb3afa1692b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.232404 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:17 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:17 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:17 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.232490 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.381441 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-qrscw" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.640862 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.640877 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ce871a4d-4584-4ed4-9de5-efb3afa1692b","Type":"ContainerDied","Data":"22e9692dd000842ee51bb0c2b72cb3d0b99ef261cce78e071313868749801e2a"} Nov 22 04:49:17 crc kubenswrapper[4948]: I1122 04:49:17.641243 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22e9692dd000842ee51bb0c2b72cb3d0b99ef261cce78e071313868749801e2a" Nov 22 04:49:18 crc kubenswrapper[4948]: I1122 04:49:18.231911 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:18 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:18 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:18 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:18 crc kubenswrapper[4948]: I1122 04:49:18.232001 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:19 crc kubenswrapper[4948]: I1122 04:49:19.231140 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:19 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:19 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:19 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:19 crc kubenswrapper[4948]: I1122 04:49:19.231215 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:20 crc kubenswrapper[4948]: I1122 04:49:20.280032 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:20 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:20 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:20 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:20 crc kubenswrapper[4948]: I1122 04:49:20.281215 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:21 crc kubenswrapper[4948]: I1122 04:49:21.230900 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:21 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:21 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:21 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:21 crc kubenswrapper[4948]: I1122 04:49:21.231211 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:21 crc kubenswrapper[4948]: I1122 04:49:21.679691 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-lpvjc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 22 04:49:21 crc kubenswrapper[4948]: I1122 04:49:21.679717 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-lpvjc container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 22 04:49:21 crc kubenswrapper[4948]: I1122 04:49:21.679751 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lpvjc" podUID="c74bf710-a8fa-4d55-9f68-771f56c145f7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 22 04:49:21 crc kubenswrapper[4948]: I1122 04:49:21.679766 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-lpvjc" podUID="c74bf710-a8fa-4d55-9f68-771f56c145f7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 22 04:49:22 crc kubenswrapper[4948]: I1122 04:49:22.046858 4948 patch_prober.go:28] interesting pod/console-f9d7485db-kmbfh container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 22 04:49:22 crc kubenswrapper[4948]: I1122 04:49:22.046907 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-kmbfh" podUID="3ce40099-332b-49b0-8eee-914df6a6a572" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 22 04:49:22 crc kubenswrapper[4948]: I1122 04:49:22.200620 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:49:22 crc kubenswrapper[4948]: I1122 04:49:22.206851 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a35ebfd-12d4-4129-9c61-9d5880130fa0-metrics-certs\") pod \"network-metrics-daemon-btkdx\" (UID: \"9a35ebfd-12d4-4129-9c61-9d5880130fa0\") " pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:49:22 crc kubenswrapper[4948]: I1122 04:49:22.231889 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:22 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:22 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:22 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:22 crc kubenswrapper[4948]: I1122 04:49:22.231943 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:22 crc kubenswrapper[4948]: I1122 04:49:22.279129 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-btkdx" Nov 22 04:49:23 crc kubenswrapper[4948]: I1122 04:49:23.230137 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:23 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Nov 22 04:49:23 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:23 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:23 crc kubenswrapper[4948]: I1122 04:49:23.231442 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:24 crc kubenswrapper[4948]: I1122 04:49:24.233256 4948 patch_prober.go:28] interesting pod/router-default-5444994796-h8wxv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 04:49:24 crc kubenswrapper[4948]: [+]has-synced ok Nov 22 04:49:24 crc kubenswrapper[4948]: [+]process-running ok Nov 22 04:49:24 crc kubenswrapper[4948]: healthz check failed Nov 22 04:49:24 crc kubenswrapper[4948]: I1122 04:49:24.233394 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-h8wxv" podUID="ae58b080-edff-4582-839f-fc67d5b0b981" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 04:49:25 crc kubenswrapper[4948]: I1122 04:49:25.232570 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:25 crc kubenswrapper[4948]: I1122 04:49:25.235966 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-h8wxv" Nov 22 04:49:29 crc kubenswrapper[4948]: I1122 04:49:29.789831 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:49:29 crc kubenswrapper[4948]: I1122 04:49:29.790171 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:49:31 crc kubenswrapper[4948]: I1122 04:49:31.375274 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:49:31 crc kubenswrapper[4948]: I1122 04:49:31.684720 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-lpvjc" Nov 22 04:49:32 crc kubenswrapper[4948]: I1122 04:49:32.198488 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:49:32 crc kubenswrapper[4948]: I1122 04:49:32.203311 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-kmbfh" Nov 22 04:49:39 crc kubenswrapper[4948]: E1122 04:49:39.907197 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 04:49:39 crc kubenswrapper[4948]: E1122 04:49:39.908196 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6cpnh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-cs2cm_openshift-marketplace(268b6b3d-a26e-4d86-9ef9-dba7dc5478cd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:49:39 crc kubenswrapper[4948]: E1122 04:49:39.910033 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-cs2cm" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" Nov 22 04:49:42 crc kubenswrapper[4948]: I1122 04:49:42.298393 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h6gh5" Nov 22 04:49:44 crc kubenswrapper[4948]: E1122 04:49:44.020133 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-cs2cm" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" Nov 22 04:49:48 crc kubenswrapper[4948]: I1122 04:49:48.006820 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 04:49:49 crc kubenswrapper[4948]: E1122 04:49:49.412959 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 04:49:49 crc kubenswrapper[4948]: E1122 04:49:49.413147 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-phdkz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-x57l2_openshift-marketplace(4ba86786-2cde-4556-949f-f0c07fb471ec): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:49:49 crc kubenswrapper[4948]: E1122 04:49:49.414366 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-x57l2" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" Nov 22 04:49:50 crc kubenswrapper[4948]: E1122 04:49:50.937487 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-x57l2" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" Nov 22 04:49:51 crc kubenswrapper[4948]: E1122 04:49:51.063382 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 04:49:51 crc kubenswrapper[4948]: E1122 04:49:51.063605 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v4zfc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5lglc_openshift-marketplace(26a59e1e-ef32-4b87-86ae-eb86aadafcad): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:49:51 crc kubenswrapper[4948]: E1122 04:49:51.064835 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-5lglc" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" Nov 22 04:49:57 crc kubenswrapper[4948]: E1122 04:49:57.191761 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5lglc" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" Nov 22 04:49:57 crc kubenswrapper[4948]: I1122 04:49:57.627525 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-btkdx"] Nov 22 04:49:57 crc kubenswrapper[4948]: I1122 04:49:57.891014 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-btkdx" event={"ID":"9a35ebfd-12d4-4129-9c61-9d5880130fa0","Type":"ContainerStarted","Data":"6f9feb8988e54d9169bbe118cf146f3437c69ba1b333082b1c8edf47a0333043"} Nov 22 04:49:59 crc kubenswrapper[4948]: E1122 04:49:59.198362 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 04:49:59 crc kubenswrapper[4948]: E1122 04:49:59.198883 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h2d94,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-lrv8b_openshift-marketplace(04278e85-8ed2-4820-adbf-ee745fc13337): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:49:59 crc kubenswrapper[4948]: E1122 04:49:59.200153 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-lrv8b" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" Nov 22 04:49:59 crc kubenswrapper[4948]: I1122 04:49:59.790120 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:49:59 crc kubenswrapper[4948]: I1122 04:49:59.790191 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:49:59 crc kubenswrapper[4948]: E1122 04:49:59.901078 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-lrv8b" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" Nov 22 04:50:00 crc kubenswrapper[4948]: I1122 04:50:00.908372 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-btkdx" event={"ID":"9a35ebfd-12d4-4129-9c61-9d5880130fa0","Type":"ContainerStarted","Data":"09e7cb854658797b4c5ebf881726477e6ddb6a6335c9f13f68c9e24e6586a6b7"} Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.299597 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.300262 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m22cb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-9ltfm_openshift-marketplace(94c90684-8f9b-4922-ad0b-1fa3b77d0767): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.301848 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-9ltfm" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.303000 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.303145 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hc94m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-fw6ph_openshift-marketplace(f43c81a6-850d-442c-92b2-1c45ab8e32cf): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.304504 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-fw6ph" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.309230 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.309508 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8jf48,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-l8rpk_openshift-marketplace(74a8814b-851c-4d5f-833b-ca0c87b76f48): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.310724 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-l8rpk" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.920129 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-9ltfm" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.920170 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-fw6ph" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" Nov 22 04:50:02 crc kubenswrapper[4948]: E1122 04:50:02.920323 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-l8rpk" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" Nov 22 04:50:03 crc kubenswrapper[4948]: E1122 04:50:03.125846 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 04:50:03 crc kubenswrapper[4948]: E1122 04:50:03.126250 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rccdm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-fmtjw_openshift-marketplace(be825ff5-e561-447a-b1d2-1676b8577454): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 04:50:03 crc kubenswrapper[4948]: E1122 04:50:03.128128 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-fmtjw" podUID="be825ff5-e561-447a-b1d2-1676b8577454" Nov 22 04:50:03 crc kubenswrapper[4948]: I1122 04:50:03.926120 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-btkdx" event={"ID":"9a35ebfd-12d4-4129-9c61-9d5880130fa0","Type":"ContainerStarted","Data":"8f1f3dea13b06fac07cd9c611e87df065ce7e4bec9d13fe11439392691188d05"} Nov 22 04:50:03 crc kubenswrapper[4948]: E1122 04:50:03.928080 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-fmtjw" podUID="be825ff5-e561-447a-b1d2-1676b8577454" Nov 22 04:50:03 crc kubenswrapper[4948]: I1122 04:50:03.947733 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-btkdx" podStartSLOduration=184.947704524 podStartE2EDuration="3m4.947704524s" podCreationTimestamp="2025-11-22 04:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:50:03.946213272 +0000 UTC m=+206.632223798" watchObservedRunningTime="2025-11-22 04:50:03.947704524 +0000 UTC m=+206.633715060" Nov 22 04:50:13 crc kubenswrapper[4948]: I1122 04:50:13.987036 4948 generic.go:334] "Generic (PLEG): container finished" podID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerID="6370cd602d66fdbb309c5987a2616d607685d7d6898068031713068fdaa10f67" exitCode=0 Nov 22 04:50:13 crc kubenswrapper[4948]: I1122 04:50:13.987108 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x57l2" event={"ID":"4ba86786-2cde-4556-949f-f0c07fb471ec","Type":"ContainerDied","Data":"6370cd602d66fdbb309c5987a2616d607685d7d6898068031713068fdaa10f67"} Nov 22 04:50:13 crc kubenswrapper[4948]: I1122 04:50:13.989836 4948 generic.go:334] "Generic (PLEG): container finished" podID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerID="4f69f08b7e8db72ef6b985a649e240b7f52b19a1ba832d4e24338de7c931822b" exitCode=0 Nov 22 04:50:13 crc kubenswrapper[4948]: I1122 04:50:13.989884 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cs2cm" event={"ID":"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd","Type":"ContainerDied","Data":"4f69f08b7e8db72ef6b985a649e240b7f52b19a1ba832d4e24338de7c931822b"} Nov 22 04:50:14 crc kubenswrapper[4948]: I1122 04:50:14.998257 4948 generic.go:334] "Generic (PLEG): container finished" podID="04278e85-8ed2-4820-adbf-ee745fc13337" containerID="6ffd9e88352d3186cf2464995e5bb4f758f84a26eb94b71be6cbcd2491ef4249" exitCode=0 Nov 22 04:50:15 crc kubenswrapper[4948]: I1122 04:50:14.998335 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrv8b" event={"ID":"04278e85-8ed2-4820-adbf-ee745fc13337","Type":"ContainerDied","Data":"6ffd9e88352d3186cf2464995e5bb4f758f84a26eb94b71be6cbcd2491ef4249"} Nov 22 04:50:15 crc kubenswrapper[4948]: I1122 04:50:15.001451 4948 generic.go:334] "Generic (PLEG): container finished" podID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerID="aa1be7aee560dc39f2ef522869b54e02fab53dad0aa2b6db9904d05289c43108" exitCode=0 Nov 22 04:50:15 crc kubenswrapper[4948]: I1122 04:50:15.001492 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lglc" event={"ID":"26a59e1e-ef32-4b87-86ae-eb86aadafcad","Type":"ContainerDied","Data":"aa1be7aee560dc39f2ef522869b54e02fab53dad0aa2b6db9904d05289c43108"} Nov 22 04:50:16 crc kubenswrapper[4948]: I1122 04:50:16.021943 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cs2cm" event={"ID":"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd","Type":"ContainerStarted","Data":"d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de"} Nov 22 04:50:16 crc kubenswrapper[4948]: I1122 04:50:16.024862 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x57l2" event={"ID":"4ba86786-2cde-4556-949f-f0c07fb471ec","Type":"ContainerStarted","Data":"280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e"} Nov 22 04:50:16 crc kubenswrapper[4948]: I1122 04:50:16.061092 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cs2cm" podStartSLOduration=3.651763409 podStartE2EDuration="1m6.061060111s" podCreationTimestamp="2025-11-22 04:49:10 +0000 UTC" firstStartedPulling="2025-11-22 04:49:12.519762555 +0000 UTC m=+155.205773071" lastFinishedPulling="2025-11-22 04:50:14.929059257 +0000 UTC m=+217.615069773" observedRunningTime="2025-11-22 04:50:16.042364605 +0000 UTC m=+218.728375121" watchObservedRunningTime="2025-11-22 04:50:16.061060111 +0000 UTC m=+218.747070627" Nov 22 04:50:17 crc kubenswrapper[4948]: I1122 04:50:17.031242 4948 generic.go:334] "Generic (PLEG): container finished" podID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerID="591c4dee722a7bdc43563d6b816f967fb2b86f39afe44cca95a9139fa88966d6" exitCode=0 Nov 22 04:50:17 crc kubenswrapper[4948]: I1122 04:50:17.031269 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ltfm" event={"ID":"94c90684-8f9b-4922-ad0b-1fa3b77d0767","Type":"ContainerDied","Data":"591c4dee722a7bdc43563d6b816f967fb2b86f39afe44cca95a9139fa88966d6"} Nov 22 04:50:17 crc kubenswrapper[4948]: I1122 04:50:17.033170 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrv8b" event={"ID":"04278e85-8ed2-4820-adbf-ee745fc13337","Type":"ContainerStarted","Data":"89277f07ab60b383630325a1fc44631f96a96c60b0b421ae3cd3d86754b71ab7"} Nov 22 04:50:17 crc kubenswrapper[4948]: I1122 04:50:17.035565 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lglc" event={"ID":"26a59e1e-ef32-4b87-86ae-eb86aadafcad","Type":"ContainerStarted","Data":"ad0c40a6841e27524254edc91c06f25cbfa92b2f3c85c7032dfb362efc400802"} Nov 22 04:50:17 crc kubenswrapper[4948]: I1122 04:50:17.057934 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x57l2" podStartSLOduration=4.468773319 podStartE2EDuration="1m5.057910368s" podCreationTimestamp="2025-11-22 04:49:12 +0000 UTC" firstStartedPulling="2025-11-22 04:49:14.591667893 +0000 UTC m=+157.277678409" lastFinishedPulling="2025-11-22 04:50:15.180804942 +0000 UTC m=+217.866815458" observedRunningTime="2025-11-22 04:50:16.063060638 +0000 UTC m=+218.749071154" watchObservedRunningTime="2025-11-22 04:50:17.057910368 +0000 UTC m=+219.743920884" Nov 22 04:50:17 crc kubenswrapper[4948]: I1122 04:50:17.081330 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lrv8b" podStartSLOduration=3.5883265030000002 podStartE2EDuration="1m7.081311898s" podCreationTimestamp="2025-11-22 04:49:10 +0000 UTC" firstStartedPulling="2025-11-22 04:49:12.522309337 +0000 UTC m=+155.208319853" lastFinishedPulling="2025-11-22 04:50:16.015294732 +0000 UTC m=+218.701305248" observedRunningTime="2025-11-22 04:50:17.079516837 +0000 UTC m=+219.765527353" watchObservedRunningTime="2025-11-22 04:50:17.081311898 +0000 UTC m=+219.767322414" Nov 22 04:50:17 crc kubenswrapper[4948]: I1122 04:50:17.775277 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5lglc" podStartSLOduration=4.494760165 podStartE2EDuration="1m4.775254997s" podCreationTimestamp="2025-11-22 04:49:13 +0000 UTC" firstStartedPulling="2025-11-22 04:49:15.607439371 +0000 UTC m=+158.293449887" lastFinishedPulling="2025-11-22 04:50:15.887934203 +0000 UTC m=+218.573944719" observedRunningTime="2025-11-22 04:50:17.098002788 +0000 UTC m=+219.784013304" watchObservedRunningTime="2025-11-22 04:50:17.775254997 +0000 UTC m=+220.461265523" Nov 22 04:50:18 crc kubenswrapper[4948]: I1122 04:50:18.041558 4948 generic.go:334] "Generic (PLEG): container finished" podID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerID="c39acc48fc145dad4964b9919ccb9b6b623cc3a27af3a1a54c4709ea8102422b" exitCode=0 Nov 22 04:50:18 crc kubenswrapper[4948]: I1122 04:50:18.041596 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8rpk" event={"ID":"74a8814b-851c-4d5f-833b-ca0c87b76f48","Type":"ContainerDied","Data":"c39acc48fc145dad4964b9919ccb9b6b623cc3a27af3a1a54c4709ea8102422b"} Nov 22 04:50:19 crc kubenswrapper[4948]: I1122 04:50:19.048557 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ltfm" event={"ID":"94c90684-8f9b-4922-ad0b-1fa3b77d0767","Type":"ContainerStarted","Data":"26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea"} Nov 22 04:50:19 crc kubenswrapper[4948]: I1122 04:50:19.064817 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9ltfm" podStartSLOduration=2.375088458 podStartE2EDuration="1m5.064792463s" podCreationTimestamp="2025-11-22 04:49:14 +0000 UTC" firstStartedPulling="2025-11-22 04:49:15.60383521 +0000 UTC m=+158.289845726" lastFinishedPulling="2025-11-22 04:50:18.293539205 +0000 UTC m=+220.979549731" observedRunningTime="2025-11-22 04:50:19.063891197 +0000 UTC m=+221.749901713" watchObservedRunningTime="2025-11-22 04:50:19.064792463 +0000 UTC m=+221.750803019" Nov 22 04:50:20 crc kubenswrapper[4948]: I1122 04:50:20.887189 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:50:20 crc kubenswrapper[4948]: I1122 04:50:20.887550 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:50:21 crc kubenswrapper[4948]: I1122 04:50:21.161453 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:50:21 crc kubenswrapper[4948]: I1122 04:50:21.161702 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:50:22 crc kubenswrapper[4948]: I1122 04:50:22.379209 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:50:22 crc kubenswrapper[4948]: I1122 04:50:22.382700 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:50:22 crc kubenswrapper[4948]: I1122 04:50:22.453147 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:50:23 crc kubenswrapper[4948]: I1122 04:50:23.131798 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:50:23 crc kubenswrapper[4948]: I1122 04:50:23.132089 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:50:23 crc kubenswrapper[4948]: I1122 04:50:23.148194 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:50:23 crc kubenswrapper[4948]: I1122 04:50:23.222521 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:50:24 crc kubenswrapper[4948]: I1122 04:50:24.137626 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:50:24 crc kubenswrapper[4948]: I1122 04:50:24.185318 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:50:24 crc kubenswrapper[4948]: I1122 04:50:24.185379 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:50:24 crc kubenswrapper[4948]: I1122 04:50:24.236583 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:50:24 crc kubenswrapper[4948]: I1122 04:50:24.391339 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cs2cm"] Nov 22 04:50:24 crc kubenswrapper[4948]: I1122 04:50:24.499085 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:50:24 crc kubenswrapper[4948]: I1122 04:50:24.499157 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:50:25 crc kubenswrapper[4948]: I1122 04:50:25.086198 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cs2cm" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="registry-server" containerID="cri-o://d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" gracePeriod=2 Nov 22 04:50:25 crc kubenswrapper[4948]: I1122 04:50:25.154569 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:50:25 crc kubenswrapper[4948]: I1122 04:50:25.555716 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9ltfm" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="registry-server" probeResult="failure" output=< Nov 22 04:50:25 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Nov 22 04:50:25 crc kubenswrapper[4948]: > Nov 22 04:50:26 crc kubenswrapper[4948]: I1122 04:50:26.790292 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x57l2"] Nov 22 04:50:27 crc kubenswrapper[4948]: I1122 04:50:27.098309 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x57l2" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="registry-server" containerID="cri-o://280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" gracePeriod=2 Nov 22 04:50:29 crc kubenswrapper[4948]: I1122 04:50:29.790099 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:50:29 crc kubenswrapper[4948]: I1122 04:50:29.790165 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:50:29 crc kubenswrapper[4948]: I1122 04:50:29.790219 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:50:29 crc kubenswrapper[4948]: I1122 04:50:29.790840 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 04:50:29 crc kubenswrapper[4948]: I1122 04:50:29.790953 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58" gracePeriod=600 Nov 22 04:50:31 crc kubenswrapper[4948]: E1122 04:50:31.161931 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:31 crc kubenswrapper[4948]: E1122 04:50:31.162546 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:31 crc kubenswrapper[4948]: E1122 04:50:31.162848 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:31 crc kubenswrapper[4948]: E1122 04:50:31.162940 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-cs2cm" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="registry-server" Nov 22 04:50:32 crc kubenswrapper[4948]: I1122 04:50:32.955810 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cs2cm_268b6b3d-a26e-4d86-9ef9-dba7dc5478cd/registry-server/0.log" Nov 22 04:50:32 crc kubenswrapper[4948]: I1122 04:50:32.957559 4948 generic.go:334] "Generic (PLEG): container finished" podID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" exitCode=137 Nov 22 04:50:32 crc kubenswrapper[4948]: I1122 04:50:32.957619 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cs2cm" event={"ID":"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd","Type":"ContainerDied","Data":"d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de"} Nov 22 04:50:33 crc kubenswrapper[4948]: E1122 04:50:33.131930 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:33 crc kubenswrapper[4948]: E1122 04:50:33.132600 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:33 crc kubenswrapper[4948]: E1122 04:50:33.133265 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:33 crc kubenswrapper[4948]: E1122 04:50:33.133386 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-x57l2" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="registry-server" Nov 22 04:50:33 crc kubenswrapper[4948]: I1122 04:50:33.967342 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-x57l2_4ba86786-2cde-4556-949f-f0c07fb471ec/registry-server/0.log" Nov 22 04:50:33 crc kubenswrapper[4948]: I1122 04:50:33.968711 4948 generic.go:334] "Generic (PLEG): container finished" podID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" exitCode=137 Nov 22 04:50:33 crc kubenswrapper[4948]: I1122 04:50:33.968831 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x57l2" event={"ID":"4ba86786-2cde-4556-949f-f0c07fb471ec","Type":"ContainerDied","Data":"280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e"} Nov 22 04:50:33 crc kubenswrapper[4948]: I1122 04:50:33.971900 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58" exitCode=0 Nov 22 04:50:33 crc kubenswrapper[4948]: I1122 04:50:33.971980 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58"} Nov 22 04:50:34 crc kubenswrapper[4948]: I1122 04:50:34.562423 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:50:34 crc kubenswrapper[4948]: I1122 04:50:34.629084 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:50:35 crc kubenswrapper[4948]: I1122 04:50:35.994660 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9ltfm"] Nov 22 04:50:35 crc kubenswrapper[4948]: I1122 04:50:35.995053 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9ltfm" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="registry-server" containerID="cri-o://26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea" gracePeriod=2 Nov 22 04:50:41 crc kubenswrapper[4948]: E1122 04:50:41.162095 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:41 crc kubenswrapper[4948]: E1122 04:50:41.163811 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:41 crc kubenswrapper[4948]: E1122 04:50:41.164523 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:41 crc kubenswrapper[4948]: E1122 04:50:41.164627 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-cs2cm" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="registry-server" Nov 22 04:50:43 crc kubenswrapper[4948]: I1122 04:50:43.041358 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9ltfm_94c90684-8f9b-4922-ad0b-1fa3b77d0767/registry-server/0.log" Nov 22 04:50:43 crc kubenswrapper[4948]: I1122 04:50:43.042778 4948 generic.go:334] "Generic (PLEG): container finished" podID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerID="26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea" exitCode=137 Nov 22 04:50:43 crc kubenswrapper[4948]: I1122 04:50:43.042849 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ltfm" event={"ID":"94c90684-8f9b-4922-ad0b-1fa3b77d0767","Type":"ContainerDied","Data":"26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea"} Nov 22 04:50:43 crc kubenswrapper[4948]: E1122 04:50:43.131667 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:43 crc kubenswrapper[4948]: E1122 04:50:43.132594 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:43 crc kubenswrapper[4948]: E1122 04:50:43.133117 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:43 crc kubenswrapper[4948]: E1122 04:50:43.133185 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-x57l2" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="registry-server" Nov 22 04:50:44 crc kubenswrapper[4948]: E1122 04:50:44.499932 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea is running failed: container process not found" containerID="26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:44 crc kubenswrapper[4948]: E1122 04:50:44.500864 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea is running failed: container process not found" containerID="26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:44 crc kubenswrapper[4948]: E1122 04:50:44.501499 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea is running failed: container process not found" containerID="26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 04:50:44 crc kubenswrapper[4948]: E1122 04:50:44.501552 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-9ltfm" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="registry-server" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.656037 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cs2cm_268b6b3d-a26e-4d86-9ef9-dba7dc5478cd/registry-server/0.log" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.658227 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.663764 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-x57l2_4ba86786-2cde-4556-949f-f0c07fb471ec/registry-server/0.log" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.664766 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.789284 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-utilities\") pod \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.789346 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-catalog-content\") pod \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.789378 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-utilities\") pod \"4ba86786-2cde-4556-949f-f0c07fb471ec\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.789424 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phdkz\" (UniqueName: \"kubernetes.io/projected/4ba86786-2cde-4556-949f-f0c07fb471ec-kube-api-access-phdkz\") pod \"4ba86786-2cde-4556-949f-f0c07fb471ec\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.789483 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cpnh\" (UniqueName: \"kubernetes.io/projected/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-kube-api-access-6cpnh\") pod \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\" (UID: \"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd\") " Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.789507 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-catalog-content\") pod \"4ba86786-2cde-4556-949f-f0c07fb471ec\" (UID: \"4ba86786-2cde-4556-949f-f0c07fb471ec\") " Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.790562 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-utilities" (OuterVolumeSpecName: "utilities") pod "268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" (UID: "268b6b3d-a26e-4d86-9ef9-dba7dc5478cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.790629 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-utilities" (OuterVolumeSpecName: "utilities") pod "4ba86786-2cde-4556-949f-f0c07fb471ec" (UID: "4ba86786-2cde-4556-949f-f0c07fb471ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.796349 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-kube-api-access-6cpnh" (OuterVolumeSpecName: "kube-api-access-6cpnh") pod "268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" (UID: "268b6b3d-a26e-4d86-9ef9-dba7dc5478cd"). InnerVolumeSpecName "kube-api-access-6cpnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.799331 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ba86786-2cde-4556-949f-f0c07fb471ec-kube-api-access-phdkz" (OuterVolumeSpecName: "kube-api-access-phdkz") pod "4ba86786-2cde-4556-949f-f0c07fb471ec" (UID: "4ba86786-2cde-4556-949f-f0c07fb471ec"). InnerVolumeSpecName "kube-api-access-phdkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.891384 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cpnh\" (UniqueName: \"kubernetes.io/projected/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-kube-api-access-6cpnh\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.891488 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.891513 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:44 crc kubenswrapper[4948]: I1122 04:50:44.891556 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phdkz\" (UniqueName: \"kubernetes.io/projected/4ba86786-2cde-4556-949f-f0c07fb471ec-kube-api-access-phdkz\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.058624 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cs2cm_268b6b3d-a26e-4d86-9ef9-dba7dc5478cd/registry-server/0.log" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.060024 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cs2cm" event={"ID":"268b6b3d-a26e-4d86-9ef9-dba7dc5478cd","Type":"ContainerDied","Data":"4d90b56d0cb5a0795771cead4a0bc76e47d0a38d5c7696b089754ade30952424"} Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.060066 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cs2cm" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.060120 4948 scope.go:117] "RemoveContainer" containerID="d50cb6369ef1dd55f6d70481562e5582c8443098bcdc596d3fd87df6c3aa95de" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.062686 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-x57l2_4ba86786-2cde-4556-949f-f0c07fb471ec/registry-server/0.log" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.064728 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x57l2" event={"ID":"4ba86786-2cde-4556-949f-f0c07fb471ec","Type":"ContainerDied","Data":"ea017a8eeba08432ca72872a2c69b9eec72e1e956f24e1fbee777f169c2d431c"} Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.064842 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x57l2" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.315764 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ba86786-2cde-4556-949f-f0c07fb471ec" (UID: "4ba86786-2cde-4556-949f-f0c07fb471ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.401593 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba86786-2cde-4556-949f-f0c07fb471ec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.416949 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x57l2"] Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.422281 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x57l2"] Nov 22 04:50:45 crc kubenswrapper[4948]: I1122 04:50:45.768738 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" path="/var/lib/kubelet/pods/4ba86786-2cde-4556-949f-f0c07fb471ec/volumes" Nov 22 04:50:46 crc kubenswrapper[4948]: I1122 04:50:46.330535 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" (UID: "268b6b3d-a26e-4d86-9ef9-dba7dc5478cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:50:46 crc kubenswrapper[4948]: I1122 04:50:46.415299 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:46 crc kubenswrapper[4948]: I1122 04:50:46.600302 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cs2cm"] Nov 22 04:50:46 crc kubenswrapper[4948]: I1122 04:50:46.607072 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cs2cm"] Nov 22 04:50:47 crc kubenswrapper[4948]: I1122 04:50:47.769779 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" path="/var/lib/kubelet/pods/268b6b3d-a26e-4d86-9ef9-dba7dc5478cd/volumes" Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.492177 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9ltfm_94c90684-8f9b-4922-ad0b-1fa3b77d0767/registry-server/0.log" Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.493861 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.678589 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-catalog-content\") pod \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.678736 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m22cb\" (UniqueName: \"kubernetes.io/projected/94c90684-8f9b-4922-ad0b-1fa3b77d0767-kube-api-access-m22cb\") pod \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.678918 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-utilities\") pod \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\" (UID: \"94c90684-8f9b-4922-ad0b-1fa3b77d0767\") " Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.679899 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-utilities" (OuterVolumeSpecName: "utilities") pod "94c90684-8f9b-4922-ad0b-1fa3b77d0767" (UID: "94c90684-8f9b-4922-ad0b-1fa3b77d0767"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.685795 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94c90684-8f9b-4922-ad0b-1fa3b77d0767-kube-api-access-m22cb" (OuterVolumeSpecName: "kube-api-access-m22cb") pod "94c90684-8f9b-4922-ad0b-1fa3b77d0767" (UID: "94c90684-8f9b-4922-ad0b-1fa3b77d0767"). InnerVolumeSpecName "kube-api-access-m22cb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.780012 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:50 crc kubenswrapper[4948]: I1122 04:50:50.780077 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m22cb\" (UniqueName: \"kubernetes.io/projected/94c90684-8f9b-4922-ad0b-1fa3b77d0767-kube-api-access-m22cb\") on node \"crc\" DevicePath \"\"" Nov 22 04:50:51 crc kubenswrapper[4948]: I1122 04:50:51.111442 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9ltfm_94c90684-8f9b-4922-ad0b-1fa3b77d0767/registry-server/0.log" Nov 22 04:50:51 crc kubenswrapper[4948]: I1122 04:50:51.112903 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9ltfm" event={"ID":"94c90684-8f9b-4922-ad0b-1fa3b77d0767","Type":"ContainerDied","Data":"e3fe38b2a3338591303a53671378168fd0ad0bd688ae0fcbd2684c5e54d4abfc"} Nov 22 04:50:51 crc kubenswrapper[4948]: I1122 04:50:51.112996 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9ltfm" Nov 22 04:50:51 crc kubenswrapper[4948]: I1122 04:50:51.855938 4948 scope.go:117] "RemoveContainer" containerID="4f69f08b7e8db72ef6b985a649e240b7f52b19a1ba832d4e24338de7c931822b" Nov 22 04:51:01 crc kubenswrapper[4948]: I1122 04:51:01.226749 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94c90684-8f9b-4922-ad0b-1fa3b77d0767" (UID: "94c90684-8f9b-4922-ad0b-1fa3b77d0767"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:51:01 crc kubenswrapper[4948]: I1122 04:51:01.275439 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94c90684-8f9b-4922-ad0b-1fa3b77d0767-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:51:01 crc kubenswrapper[4948]: I1122 04:51:01.339616 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9ltfm"] Nov 22 04:51:01 crc kubenswrapper[4948]: I1122 04:51:01.343901 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9ltfm"] Nov 22 04:51:01 crc kubenswrapper[4948]: I1122 04:51:01.774104 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" path="/var/lib/kubelet/pods/94c90684-8f9b-4922-ad0b-1fa3b77d0767/volumes" Nov 22 04:51:03 crc kubenswrapper[4948]: I1122 04:51:03.162111 4948 scope.go:117] "RemoveContainer" containerID="095303b08afab09853115c9997a4dabe121c3d16eac6fbefd393b8426870668f" Nov 22 04:51:11 crc kubenswrapper[4948]: I1122 04:51:11.374351 4948 scope.go:117] "RemoveContainer" containerID="280e2a81879ae490c216d89783e89d10f5813d4bca5465eef49ad06a5a898b8e" Nov 22 04:51:12 crc kubenswrapper[4948]: I1122 04:51:12.204806 4948 scope.go:117] "RemoveContainer" containerID="6370cd602d66fdbb309c5987a2616d607685d7d6898068031713068fdaa10f67" Nov 22 04:51:12 crc kubenswrapper[4948]: I1122 04:51:12.239326 4948 scope.go:117] "RemoveContainer" containerID="47d49a4032a61f2b2e1f97879ebb0503e9cc83e2abd5ca227e9f782c22a7e8ce" Nov 22 04:51:12 crc kubenswrapper[4948]: I1122 04:51:12.296148 4948 scope.go:117] "RemoveContainer" containerID="26d39fd2e56faa888647e93d0e72b1401def580e0195a4bd5f68d8a8943e30ea" Nov 22 04:51:12 crc kubenswrapper[4948]: I1122 04:51:12.363112 4948 scope.go:117] "RemoveContainer" containerID="591c4dee722a7bdc43563d6b816f967fb2b86f39afe44cca95a9139fa88966d6" Nov 22 04:51:12 crc kubenswrapper[4948]: I1122 04:51:12.422871 4948 scope.go:117] "RemoveContainer" containerID="e93cd15c4fecf7c6fafc565be33c849dd42d75a317f1f28c523f87fa8bb3ffd6" Nov 22 04:51:13 crc kubenswrapper[4948]: I1122 04:51:13.289305 4948 generic.go:334] "Generic (PLEG): container finished" podID="be825ff5-e561-447a-b1d2-1676b8577454" containerID="9796702b61c9fd209f765cc9b61e0348de70fec2ab977433723813fadc09377b" exitCode=0 Nov 22 04:51:13 crc kubenswrapper[4948]: I1122 04:51:13.289432 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmtjw" event={"ID":"be825ff5-e561-447a-b1d2-1676b8577454","Type":"ContainerDied","Data":"9796702b61c9fd209f765cc9b61e0348de70fec2ab977433723813fadc09377b"} Nov 22 04:51:13 crc kubenswrapper[4948]: I1122 04:51:13.305534 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8rpk" event={"ID":"74a8814b-851c-4d5f-833b-ca0c87b76f48","Type":"ContainerStarted","Data":"617e45413c30e6faacb760052368896694d63322c213783ca071b1e2b186c2e0"} Nov 22 04:51:13 crc kubenswrapper[4948]: I1122 04:51:13.308542 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"10bcdeb83b488e25d8fdd90a34d3e765880c41249f4b51d6922e659268909eeb"} Nov 22 04:51:13 crc kubenswrapper[4948]: I1122 04:51:13.313086 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fw6ph" event={"ID":"f43c81a6-850d-442c-92b2-1c45ab8e32cf","Type":"ContainerStarted","Data":"c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f"} Nov 22 04:51:14 crc kubenswrapper[4948]: I1122 04:51:14.321554 4948 generic.go:334] "Generic (PLEG): container finished" podID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerID="c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f" exitCode=0 Nov 22 04:51:14 crc kubenswrapper[4948]: I1122 04:51:14.321725 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fw6ph" event={"ID":"f43c81a6-850d-442c-92b2-1c45ab8e32cf","Type":"ContainerDied","Data":"c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f"} Nov 22 04:51:14 crc kubenswrapper[4948]: I1122 04:51:14.398702 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l8rpk" podStartSLOduration=3.753351911 podStartE2EDuration="2m2.398684231s" podCreationTimestamp="2025-11-22 04:49:12 +0000 UTC" firstStartedPulling="2025-11-22 04:49:13.558811641 +0000 UTC m=+156.244822157" lastFinishedPulling="2025-11-22 04:51:12.204143951 +0000 UTC m=+274.890154477" observedRunningTime="2025-11-22 04:51:14.39633579 +0000 UTC m=+277.082346356" watchObservedRunningTime="2025-11-22 04:51:14.398684231 +0000 UTC m=+277.084694757" Nov 22 04:51:19 crc kubenswrapper[4948]: I1122 04:51:19.375364 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmtjw" event={"ID":"be825ff5-e561-447a-b1d2-1676b8577454","Type":"ContainerStarted","Data":"4aa7648b1b4825f19dffdd1db647139c9291dceaec7e93ffe9e492ea8a05e749"} Nov 22 04:51:20 crc kubenswrapper[4948]: I1122 04:51:20.393000 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fw6ph" event={"ID":"f43c81a6-850d-442c-92b2-1c45ab8e32cf","Type":"ContainerStarted","Data":"168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442"} Nov 22 04:51:20 crc kubenswrapper[4948]: I1122 04:51:20.412131 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fmtjw" podStartSLOduration=5.626037039 podStartE2EDuration="2m10.412110345s" podCreationTimestamp="2025-11-22 04:49:10 +0000 UTC" firstStartedPulling="2025-11-22 04:49:12.528574954 +0000 UTC m=+155.214585470" lastFinishedPulling="2025-11-22 04:51:17.31464823 +0000 UTC m=+280.000658776" observedRunningTime="2025-11-22 04:51:20.409942309 +0000 UTC m=+283.095952845" watchObservedRunningTime="2025-11-22 04:51:20.412110345 +0000 UTC m=+283.098120861" Nov 22 04:51:20 crc kubenswrapper[4948]: I1122 04:51:20.436531 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fw6ph" podStartSLOduration=3.103229052 podStartE2EDuration="2m10.436507763s" podCreationTimestamp="2025-11-22 04:49:10 +0000 UTC" firstStartedPulling="2025-11-22 04:49:12.542835855 +0000 UTC m=+155.228846371" lastFinishedPulling="2025-11-22 04:51:19.876114526 +0000 UTC m=+282.562125082" observedRunningTime="2025-11-22 04:51:20.434339097 +0000 UTC m=+283.120349633" watchObservedRunningTime="2025-11-22 04:51:20.436507763 +0000 UTC m=+283.122518289" Nov 22 04:51:20 crc kubenswrapper[4948]: I1122 04:51:20.709444 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:51:20 crc kubenswrapper[4948]: I1122 04:51:20.709519 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:51:21 crc kubenswrapper[4948]: I1122 04:51:21.375690 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:51:21 crc kubenswrapper[4948]: I1122 04:51:21.375957 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:51:21 crc kubenswrapper[4948]: I1122 04:51:21.764643 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-fmtjw" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="registry-server" probeResult="failure" output=< Nov 22 04:51:21 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Nov 22 04:51:21 crc kubenswrapper[4948]: > Nov 22 04:51:22 crc kubenswrapper[4948]: I1122 04:51:22.419660 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-fw6ph" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="registry-server" probeResult="failure" output=< Nov 22 04:51:22 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Nov 22 04:51:22 crc kubenswrapper[4948]: > Nov 22 04:51:22 crc kubenswrapper[4948]: I1122 04:51:22.678124 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:51:22 crc kubenswrapper[4948]: I1122 04:51:22.678281 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:51:22 crc kubenswrapper[4948]: I1122 04:51:22.725824 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:51:23 crc kubenswrapper[4948]: I1122 04:51:23.461094 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:51:30 crc kubenswrapper[4948]: I1122 04:51:30.764431 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:51:30 crc kubenswrapper[4948]: I1122 04:51:30.811949 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:51:31 crc kubenswrapper[4948]: I1122 04:51:31.414852 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:51:31 crc kubenswrapper[4948]: I1122 04:51:31.454524 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:51:31 crc kubenswrapper[4948]: I1122 04:51:31.991537 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fw6ph"] Nov 22 04:51:32 crc kubenswrapper[4948]: I1122 04:51:32.455564 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fw6ph" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="registry-server" containerID="cri-o://168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442" gracePeriod=2 Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.383304 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.465563 4948 generic.go:334] "Generic (PLEG): container finished" podID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerID="168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442" exitCode=0 Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.465605 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fw6ph" event={"ID":"f43c81a6-850d-442c-92b2-1c45ab8e32cf","Type":"ContainerDied","Data":"168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442"} Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.465635 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fw6ph" event={"ID":"f43c81a6-850d-442c-92b2-1c45ab8e32cf","Type":"ContainerDied","Data":"ae057486d21ab643519a2b23047660a82a77939129c0dc8e3ca9f300acf547ec"} Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.465655 4948 scope.go:117] "RemoveContainer" containerID="168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.465693 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fw6ph" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.492026 4948 scope.go:117] "RemoveContainer" containerID="c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.492920 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-catalog-content\") pod \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.492970 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc94m\" (UniqueName: \"kubernetes.io/projected/f43c81a6-850d-442c-92b2-1c45ab8e32cf-kube-api-access-hc94m\") pod \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.493025 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-utilities\") pod \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\" (UID: \"f43c81a6-850d-442c-92b2-1c45ab8e32cf\") " Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.493749 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-utilities" (OuterVolumeSpecName: "utilities") pod "f43c81a6-850d-442c-92b2-1c45ab8e32cf" (UID: "f43c81a6-850d-442c-92b2-1c45ab8e32cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.499792 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f43c81a6-850d-442c-92b2-1c45ab8e32cf-kube-api-access-hc94m" (OuterVolumeSpecName: "kube-api-access-hc94m") pod "f43c81a6-850d-442c-92b2-1c45ab8e32cf" (UID: "f43c81a6-850d-442c-92b2-1c45ab8e32cf"). InnerVolumeSpecName "kube-api-access-hc94m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.507337 4948 scope.go:117] "RemoveContainer" containerID="3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.536077 4948 scope.go:117] "RemoveContainer" containerID="168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442" Nov 22 04:51:33 crc kubenswrapper[4948]: E1122 04:51:33.537188 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442\": container with ID starting with 168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442 not found: ID does not exist" containerID="168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.537257 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442"} err="failed to get container status \"168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442\": rpc error: code = NotFound desc = could not find container \"168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442\": container with ID starting with 168ff7d33b2c0df806030fdf35be6d7598abd2c1b4a8112da0bd5d8acd247442 not found: ID does not exist" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.537452 4948 scope.go:117] "RemoveContainer" containerID="c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f" Nov 22 04:51:33 crc kubenswrapper[4948]: E1122 04:51:33.538509 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f\": container with ID starting with c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f not found: ID does not exist" containerID="c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.538534 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f"} err="failed to get container status \"c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f\": rpc error: code = NotFound desc = could not find container \"c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f\": container with ID starting with c9556f512a99c039a61c16da2c1c967628501bf42e99ddbed6f0516c0fba120f not found: ID does not exist" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.538552 4948 scope.go:117] "RemoveContainer" containerID="3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b" Nov 22 04:51:33 crc kubenswrapper[4948]: E1122 04:51:33.538920 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b\": container with ID starting with 3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b not found: ID does not exist" containerID="3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.538959 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b"} err="failed to get container status \"3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b\": rpc error: code = NotFound desc = could not find container \"3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b\": container with ID starting with 3cbf65cbc4bc3b22af65cb478f75c432c1e62b130ed9dcb01a9eae705e5be22b not found: ID does not exist" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.542203 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f43c81a6-850d-442c-92b2-1c45ab8e32cf" (UID: "f43c81a6-850d-442c-92b2-1c45ab8e32cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.594229 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.594265 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc94m\" (UniqueName: \"kubernetes.io/projected/f43c81a6-850d-442c-92b2-1c45ab8e32cf-kube-api-access-hc94m\") on node \"crc\" DevicePath \"\"" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.594278 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43c81a6-850d-442c-92b2-1c45ab8e32cf-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.808585 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fw6ph"] Nov 22 04:51:33 crc kubenswrapper[4948]: I1122 04:51:33.814251 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fw6ph"] Nov 22 04:51:35 crc kubenswrapper[4948]: I1122 04:51:35.770989 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" path="/var/lib/kubelet/pods/f43c81a6-850d-442c-92b2-1c45ab8e32cf/volumes" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.019487 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fmtjw"] Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.020245 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fmtjw" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="registry-server" containerID="cri-o://4aa7648b1b4825f19dffdd1db647139c9291dceaec7e93ffe9e492ea8a05e749" gracePeriod=30 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.028841 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lrv8b"] Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.029271 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lrv8b" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="registry-server" containerID="cri-o://89277f07ab60b383630325a1fc44631f96a96c60b0b421ae3cd3d86754b71ab7" gracePeriod=30 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.034405 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bcq8w"] Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.034658 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" podUID="aaf64a47-9260-4ab8-83da-238e80d4965b" containerName="marketplace-operator" containerID="cri-o://d382ae16834140974abe93b2a51f058f31064cc21c0dce498c940e58c3895666" gracePeriod=30 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.038662 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8rpk"] Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.038966 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l8rpk" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="registry-server" containerID="cri-o://617e45413c30e6faacb760052368896694d63322c213783ca071b1e2b186c2e0" gracePeriod=30 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.048323 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5lglc"] Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.048617 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5lglc" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="registry-server" containerID="cri-o://ad0c40a6841e27524254edc91c06f25cbfa92b2f3c85c7032dfb362efc400802" gracePeriod=30 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057350 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-n57dg"] Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057671 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057689 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057714 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce871a4d-4584-4ed4-9de5-efb3afa1692b" containerName="pruner" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057722 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce871a4d-4584-4ed4-9de5-efb3afa1692b" containerName="pruner" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057742 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057753 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057767 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057775 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057790 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057799 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057810 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057818 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057832 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057840 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057852 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057861 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057873 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057881 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057893 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057901 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057911 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057919 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057932 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057940 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="extract-content" Nov 22 04:52:06 crc kubenswrapper[4948]: E1122 04:52:06.057953 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.057962 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="extract-utilities" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.058073 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f43c81a6-850d-442c-92b2-1c45ab8e32cf" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.058087 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="268b6b3d-a26e-4d86-9ef9-dba7dc5478cd" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.058105 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce871a4d-4584-4ed4-9de5-efb3afa1692b" containerName="pruner" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.058122 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="94c90684-8f9b-4922-ad0b-1fa3b77d0767" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.058131 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ba86786-2cde-4556-949f-f0c07fb471ec" containerName="registry-server" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.058606 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.074865 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-n57dg"] Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.176584 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.176637 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.176733 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tckqm\" (UniqueName: \"kubernetes.io/projected/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-kube-api-access-tckqm\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.277765 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tckqm\" (UniqueName: \"kubernetes.io/projected/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-kube-api-access-tckqm\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.277838 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.277875 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.279331 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.283921 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.296198 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tckqm\" (UniqueName: \"kubernetes.io/projected/9af9a2c0-1d8b-47e3-bc8a-5573b25fb786-kube-api-access-tckqm\") pod \"marketplace-operator-79b997595-n57dg\" (UID: \"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786\") " pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.379582 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.658584 4948 generic.go:334] "Generic (PLEG): container finished" podID="aaf64a47-9260-4ab8-83da-238e80d4965b" containerID="d382ae16834140974abe93b2a51f058f31064cc21c0dce498c940e58c3895666" exitCode=0 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.658661 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" event={"ID":"aaf64a47-9260-4ab8-83da-238e80d4965b","Type":"ContainerDied","Data":"d382ae16834140974abe93b2a51f058f31064cc21c0dce498c940e58c3895666"} Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.662866 4948 generic.go:334] "Generic (PLEG): container finished" podID="04278e85-8ed2-4820-adbf-ee745fc13337" containerID="89277f07ab60b383630325a1fc44631f96a96c60b0b421ae3cd3d86754b71ab7" exitCode=0 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.662900 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrv8b" event={"ID":"04278e85-8ed2-4820-adbf-ee745fc13337","Type":"ContainerDied","Data":"89277f07ab60b383630325a1fc44631f96a96c60b0b421ae3cd3d86754b71ab7"} Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.664728 4948 generic.go:334] "Generic (PLEG): container finished" podID="be825ff5-e561-447a-b1d2-1676b8577454" containerID="4aa7648b1b4825f19dffdd1db647139c9291dceaec7e93ffe9e492ea8a05e749" exitCode=0 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.664790 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmtjw" event={"ID":"be825ff5-e561-447a-b1d2-1676b8577454","Type":"ContainerDied","Data":"4aa7648b1b4825f19dffdd1db647139c9291dceaec7e93ffe9e492ea8a05e749"} Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.666318 4948 generic.go:334] "Generic (PLEG): container finished" podID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerID="617e45413c30e6faacb760052368896694d63322c213783ca071b1e2b186c2e0" exitCode=0 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.666375 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8rpk" event={"ID":"74a8814b-851c-4d5f-833b-ca0c87b76f48","Type":"ContainerDied","Data":"617e45413c30e6faacb760052368896694d63322c213783ca071b1e2b186c2e0"} Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.668086 4948 generic.go:334] "Generic (PLEG): container finished" podID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerID="ad0c40a6841e27524254edc91c06f25cbfa92b2f3c85c7032dfb362efc400802" exitCode=0 Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.668125 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lglc" event={"ID":"26a59e1e-ef32-4b87-86ae-eb86aadafcad","Type":"ContainerDied","Data":"ad0c40a6841e27524254edc91c06f25cbfa92b2f3c85c7032dfb362efc400802"} Nov 22 04:52:06 crc kubenswrapper[4948]: I1122 04:52:06.777536 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-n57dg"] Nov 22 04:52:06 crc kubenswrapper[4948]: W1122 04:52:06.786542 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9af9a2c0_1d8b_47e3_bc8a_5573b25fb786.slice/crio-658ed625489137f45fd538fc8de2784c696d58833451b0417cb07a5ec201bc6d WatchSource:0}: Error finding container 658ed625489137f45fd538fc8de2784c696d58833451b0417cb07a5ec201bc6d: Status 404 returned error can't find the container with id 658ed625489137f45fd538fc8de2784c696d58833451b0417cb07a5ec201bc6d Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.085523 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.126809 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2vbhk"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.191827 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-trusted-ca\") pod \"aaf64a47-9260-4ab8-83da-238e80d4965b\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.191889 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-operator-metrics\") pod \"aaf64a47-9260-4ab8-83da-238e80d4965b\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.191951 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bbss\" (UniqueName: \"kubernetes.io/projected/aaf64a47-9260-4ab8-83da-238e80d4965b-kube-api-access-6bbss\") pod \"aaf64a47-9260-4ab8-83da-238e80d4965b\" (UID: \"aaf64a47-9260-4ab8-83da-238e80d4965b\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.195615 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "aaf64a47-9260-4ab8-83da-238e80d4965b" (UID: "aaf64a47-9260-4ab8-83da-238e80d4965b"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.211335 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "aaf64a47-9260-4ab8-83da-238e80d4965b" (UID: "aaf64a47-9260-4ab8-83da-238e80d4965b"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.216735 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aaf64a47-9260-4ab8-83da-238e80d4965b-kube-api-access-6bbss" (OuterVolumeSpecName: "kube-api-access-6bbss") pod "aaf64a47-9260-4ab8-83da-238e80d4965b" (UID: "aaf64a47-9260-4ab8-83da-238e80d4965b"). InnerVolumeSpecName "kube-api-access-6bbss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.293642 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bbss\" (UniqueName: \"kubernetes.io/projected/aaf64a47-9260-4ab8-83da-238e80d4965b-kube-api-access-6bbss\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.293676 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.293690 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aaf64a47-9260-4ab8-83da-238e80d4965b-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.310027 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.315146 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.331369 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.337343 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.495916 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-utilities\") pod \"04278e85-8ed2-4820-adbf-ee745fc13337\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.495995 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jf48\" (UniqueName: \"kubernetes.io/projected/74a8814b-851c-4d5f-833b-ca0c87b76f48-kube-api-access-8jf48\") pod \"74a8814b-851c-4d5f-833b-ca0c87b76f48\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496019 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-utilities\") pod \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496046 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-catalog-content\") pod \"74a8814b-851c-4d5f-833b-ca0c87b76f48\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496069 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4zfc\" (UniqueName: \"kubernetes.io/projected/26a59e1e-ef32-4b87-86ae-eb86aadafcad-kube-api-access-v4zfc\") pod \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496113 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-catalog-content\") pod \"be825ff5-e561-447a-b1d2-1676b8577454\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496173 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2d94\" (UniqueName: \"kubernetes.io/projected/04278e85-8ed2-4820-adbf-ee745fc13337-kube-api-access-h2d94\") pod \"04278e85-8ed2-4820-adbf-ee745fc13337\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496193 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-utilities\") pod \"74a8814b-851c-4d5f-833b-ca0c87b76f48\" (UID: \"74a8814b-851c-4d5f-833b-ca0c87b76f48\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496217 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-catalog-content\") pod \"04278e85-8ed2-4820-adbf-ee745fc13337\" (UID: \"04278e85-8ed2-4820-adbf-ee745fc13337\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496249 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rccdm\" (UniqueName: \"kubernetes.io/projected/be825ff5-e561-447a-b1d2-1676b8577454-kube-api-access-rccdm\") pod \"be825ff5-e561-447a-b1d2-1676b8577454\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496288 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-utilities\") pod \"be825ff5-e561-447a-b1d2-1676b8577454\" (UID: \"be825ff5-e561-447a-b1d2-1676b8577454\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496308 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-catalog-content\") pod \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\" (UID: \"26a59e1e-ef32-4b87-86ae-eb86aadafcad\") " Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.496819 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-utilities" (OuterVolumeSpecName: "utilities") pod "04278e85-8ed2-4820-adbf-ee745fc13337" (UID: "04278e85-8ed2-4820-adbf-ee745fc13337"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.497151 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-utilities" (OuterVolumeSpecName: "utilities") pod "26a59e1e-ef32-4b87-86ae-eb86aadafcad" (UID: "26a59e1e-ef32-4b87-86ae-eb86aadafcad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.497432 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-utilities" (OuterVolumeSpecName: "utilities") pod "be825ff5-e561-447a-b1d2-1676b8577454" (UID: "be825ff5-e561-447a-b1d2-1676b8577454"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.497518 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-utilities" (OuterVolumeSpecName: "utilities") pod "74a8814b-851c-4d5f-833b-ca0c87b76f48" (UID: "74a8814b-851c-4d5f-833b-ca0c87b76f48"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.502625 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26a59e1e-ef32-4b87-86ae-eb86aadafcad-kube-api-access-v4zfc" (OuterVolumeSpecName: "kube-api-access-v4zfc") pod "26a59e1e-ef32-4b87-86ae-eb86aadafcad" (UID: "26a59e1e-ef32-4b87-86ae-eb86aadafcad"). InnerVolumeSpecName "kube-api-access-v4zfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.502658 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04278e85-8ed2-4820-adbf-ee745fc13337-kube-api-access-h2d94" (OuterVolumeSpecName: "kube-api-access-h2d94") pod "04278e85-8ed2-4820-adbf-ee745fc13337" (UID: "04278e85-8ed2-4820-adbf-ee745fc13337"). InnerVolumeSpecName "kube-api-access-h2d94". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.502711 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74a8814b-851c-4d5f-833b-ca0c87b76f48-kube-api-access-8jf48" (OuterVolumeSpecName: "kube-api-access-8jf48") pod "74a8814b-851c-4d5f-833b-ca0c87b76f48" (UID: "74a8814b-851c-4d5f-833b-ca0c87b76f48"). InnerVolumeSpecName "kube-api-access-8jf48". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.516858 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be825ff5-e561-447a-b1d2-1676b8577454-kube-api-access-rccdm" (OuterVolumeSpecName: "kube-api-access-rccdm") pod "be825ff5-e561-447a-b1d2-1676b8577454" (UID: "be825ff5-e561-447a-b1d2-1676b8577454"). InnerVolumeSpecName "kube-api-access-rccdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.526848 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "74a8814b-851c-4d5f-833b-ca0c87b76f48" (UID: "74a8814b-851c-4d5f-833b-ca0c87b76f48"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.575213 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "04278e85-8ed2-4820-adbf-ee745fc13337" (UID: "04278e85-8ed2-4820-adbf-ee745fc13337"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.592595 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be825ff5-e561-447a-b1d2-1676b8577454" (UID: "be825ff5-e561-447a-b1d2-1676b8577454"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597121 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597160 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597170 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jf48\" (UniqueName: \"kubernetes.io/projected/74a8814b-851c-4d5f-833b-ca0c87b76f48-kube-api-access-8jf48\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597182 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597191 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597199 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4zfc\" (UniqueName: \"kubernetes.io/projected/26a59e1e-ef32-4b87-86ae-eb86aadafcad-kube-api-access-v4zfc\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597207 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be825ff5-e561-447a-b1d2-1676b8577454-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597218 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2d94\" (UniqueName: \"kubernetes.io/projected/04278e85-8ed2-4820-adbf-ee745fc13337-kube-api-access-h2d94\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597225 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/74a8814b-851c-4d5f-833b-ca0c87b76f48-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597233 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04278e85-8ed2-4820-adbf-ee745fc13337-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.597241 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rccdm\" (UniqueName: \"kubernetes.io/projected/be825ff5-e561-447a-b1d2-1676b8577454-kube-api-access-rccdm\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.628563 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26a59e1e-ef32-4b87-86ae-eb86aadafcad" (UID: "26a59e1e-ef32-4b87-86ae-eb86aadafcad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.674685 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" event={"ID":"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786","Type":"ContainerStarted","Data":"a34846780fa3ea89baff85ac23ec3d340402575a3fccf59989fa12bab3dce4c8"} Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.674788 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" event={"ID":"9af9a2c0-1d8b-47e3-bc8a-5573b25fb786","Type":"ContainerStarted","Data":"658ed625489137f45fd538fc8de2784c696d58833451b0417cb07a5ec201bc6d"} Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.674816 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.676207 4948 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-n57dg container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.54:8080/healthz\": dial tcp 10.217.0.54:8080: connect: connection refused" start-of-body= Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.676248 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" podUID="9af9a2c0-1d8b-47e3-bc8a-5573b25fb786" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.54:8080/healthz\": dial tcp 10.217.0.54:8080: connect: connection refused" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.676299 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.676322 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bcq8w" event={"ID":"aaf64a47-9260-4ab8-83da-238e80d4965b","Type":"ContainerDied","Data":"5b6191cdc488524c9206a37c1caa26159211ca15b85a0315f3645fe4f7e6de74"} Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.677002 4948 scope.go:117] "RemoveContainer" containerID="d382ae16834140974abe93b2a51f058f31064cc21c0dce498c940e58c3895666" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.680936 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrv8b" event={"ID":"04278e85-8ed2-4820-adbf-ee745fc13337","Type":"ContainerDied","Data":"a816d194113fd1d936ab348047e0236370dfe145c8053219737a15a2583ecb90"} Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.680972 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lrv8b" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.682972 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmtjw" event={"ID":"be825ff5-e561-447a-b1d2-1676b8577454","Type":"ContainerDied","Data":"577800ad28cec6f0d0d8e23515fbbdfe66c4b5ff2ebfa9236ade21f5e266bda9"} Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.683072 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmtjw" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.686574 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l8rpk" event={"ID":"74a8814b-851c-4d5f-833b-ca0c87b76f48","Type":"ContainerDied","Data":"6d6880097b36956b5a04a11a9ad4475b3edbb78da6c5cd409bcf889d4f372461"} Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.686637 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l8rpk" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.696581 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lglc" event={"ID":"26a59e1e-ef32-4b87-86ae-eb86aadafcad","Type":"ContainerDied","Data":"4cbbda81a4b21aefcb0bb7513cb9426d0f30ba3709c15ead6609bdda9efe3824"} Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.696683 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5lglc" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.697902 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" podStartSLOduration=1.696012412 podStartE2EDuration="1.696012412s" podCreationTimestamp="2025-11-22 04:52:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:52:07.695255929 +0000 UTC m=+330.381266445" watchObservedRunningTime="2025-11-22 04:52:07.696012412 +0000 UTC m=+330.382022928" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.698411 4948 scope.go:117] "RemoveContainer" containerID="89277f07ab60b383630325a1fc44631f96a96c60b0b421ae3cd3d86754b71ab7" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.699389 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26a59e1e-ef32-4b87-86ae-eb86aadafcad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.719118 4948 scope.go:117] "RemoveContainer" containerID="6ffd9e88352d3186cf2464995e5bb4f758f84a26eb94b71be6cbcd2491ef4249" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.732429 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bcq8w"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.744853 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bcq8w"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.751822 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lrv8b"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.760139 4948 scope.go:117] "RemoveContainer" containerID="5f1a546be0762a2fc27683e4ecaa0004f251d8b3e3c7e0106a7ccc1f8e8ec711" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.768234 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aaf64a47-9260-4ab8-83da-238e80d4965b" path="/var/lib/kubelet/pods/aaf64a47-9260-4ab8-83da-238e80d4965b/volumes" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.771039 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lrv8b"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.771087 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8rpk"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.772668 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l8rpk"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.774966 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fmtjw"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.782728 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fmtjw"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.784677 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5lglc"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.785632 4948 scope.go:117] "RemoveContainer" containerID="4aa7648b1b4825f19dffdd1db647139c9291dceaec7e93ffe9e492ea8a05e749" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.786916 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5lglc"] Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.805333 4948 scope.go:117] "RemoveContainer" containerID="9796702b61c9fd209f765cc9b61e0348de70fec2ab977433723813fadc09377b" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.819521 4948 scope.go:117] "RemoveContainer" containerID="d5aa49eb4bd0841e6b29ab0913a610fceec6b9fe2ee31f1c3fd8541b4931e750" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.837094 4948 scope.go:117] "RemoveContainer" containerID="617e45413c30e6faacb760052368896694d63322c213783ca071b1e2b186c2e0" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.857974 4948 scope.go:117] "RemoveContainer" containerID="c39acc48fc145dad4964b9919ccb9b6b623cc3a27af3a1a54c4709ea8102422b" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.882550 4948 scope.go:117] "RemoveContainer" containerID="6b20a32128fd78fb2942e9dbec98b174919f15a693113ea4ef315fef40aacea6" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.896632 4948 scope.go:117] "RemoveContainer" containerID="ad0c40a6841e27524254edc91c06f25cbfa92b2f3c85c7032dfb362efc400802" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.917352 4948 scope.go:117] "RemoveContainer" containerID="aa1be7aee560dc39f2ef522869b54e02fab53dad0aa2b6db9904d05289c43108" Nov 22 04:52:07 crc kubenswrapper[4948]: I1122 04:52:07.934433 4948 scope.go:117] "RemoveContainer" containerID="c458ea9af7e8004240ad8e5eed277cd325ab39584477426f70558e4b4ec85bc8" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.630919 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tqk2v"] Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631126 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631140 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631157 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631165 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631174 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaf64a47-9260-4ab8-83da-238e80d4965b" containerName="marketplace-operator" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631181 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaf64a47-9260-4ab8-83da-238e80d4965b" containerName="marketplace-operator" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631192 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631198 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631207 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631215 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631223 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631231 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631243 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631253 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631266 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631273 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631281 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631289 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="extract-utilities" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631298 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631305 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631316 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631325 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631338 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631344 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: E1122 04:52:08.631354 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631361 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="extract-content" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631480 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="aaf64a47-9260-4ab8-83da-238e80d4965b" containerName="marketplace-operator" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631500 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631511 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="be825ff5-e561-447a-b1d2-1676b8577454" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631521 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.631530 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" containerName="registry-server" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.632281 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.634522 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.640133 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tqk2v"] Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.709886 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-n57dg" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.731562 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-utilities\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.731619 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-catalog-content\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.731667 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpxk7\" (UniqueName: \"kubernetes.io/projected/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-kube-api-access-jpxk7\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.832405 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-utilities\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.832617 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-catalog-content\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.832676 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpxk7\" (UniqueName: \"kubernetes.io/projected/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-kube-api-access-jpxk7\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.832817 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-utilities\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.833111 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-catalog-content\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.850214 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpxk7\" (UniqueName: \"kubernetes.io/projected/dcb0a74e-a042-44d5-86ad-8a04e2b1fd69-kube-api-access-jpxk7\") pod \"redhat-operators-tqk2v\" (UID: \"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69\") " pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:08 crc kubenswrapper[4948]: I1122 04:52:08.951740 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.146022 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tqk2v"] Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.713441 4948 generic.go:334] "Generic (PLEG): container finished" podID="dcb0a74e-a042-44d5-86ad-8a04e2b1fd69" containerID="0afd29f74f43a62e38273d80798a657d902fe3fa08b728852dccfdffd11359d3" exitCode=0 Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.713570 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqk2v" event={"ID":"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69","Type":"ContainerDied","Data":"0afd29f74f43a62e38273d80798a657d902fe3fa08b728852dccfdffd11359d3"} Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.713807 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqk2v" event={"ID":"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69","Type":"ContainerStarted","Data":"a58b4dcbadb05928adde7f35c21ec259bba4c49b392c3e4adbdaa017fdfaa56c"} Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.767212 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04278e85-8ed2-4820-adbf-ee745fc13337" path="/var/lib/kubelet/pods/04278e85-8ed2-4820-adbf-ee745fc13337/volumes" Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.768271 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26a59e1e-ef32-4b87-86ae-eb86aadafcad" path="/var/lib/kubelet/pods/26a59e1e-ef32-4b87-86ae-eb86aadafcad/volumes" Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.769671 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74a8814b-851c-4d5f-833b-ca0c87b76f48" path="/var/lib/kubelet/pods/74a8814b-851c-4d5f-833b-ca0c87b76f48/volumes" Nov 22 04:52:09 crc kubenswrapper[4948]: I1122 04:52:09.772449 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be825ff5-e561-447a-b1d2-1676b8577454" path="/var/lib/kubelet/pods/be825ff5-e561-447a-b1d2-1676b8577454/volumes" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.033693 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nm2b5"] Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.034630 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.036832 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.044104 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nm2b5"] Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.149443 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2126e6-e955-406b-b623-e9b991c5fa40-catalog-content\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.149621 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mzfh\" (UniqueName: \"kubernetes.io/projected/5c2126e6-e955-406b-b623-e9b991c5fa40-kube-api-access-2mzfh\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.149655 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2126e6-e955-406b-b623-e9b991c5fa40-utilities\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.250745 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mzfh\" (UniqueName: \"kubernetes.io/projected/5c2126e6-e955-406b-b623-e9b991c5fa40-kube-api-access-2mzfh\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.251389 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2126e6-e955-406b-b623-e9b991c5fa40-utilities\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.251639 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2126e6-e955-406b-b623-e9b991c5fa40-catalog-content\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.251889 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2126e6-e955-406b-b623-e9b991c5fa40-utilities\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.252287 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2126e6-e955-406b-b623-e9b991c5fa40-catalog-content\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.268247 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mzfh\" (UniqueName: \"kubernetes.io/projected/5c2126e6-e955-406b-b623-e9b991c5fa40-kube-api-access-2mzfh\") pod \"community-operators-nm2b5\" (UID: \"5c2126e6-e955-406b-b623-e9b991c5fa40\") " pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.352019 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.570923 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nm2b5"] Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.719747 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nm2b5" event={"ID":"5c2126e6-e955-406b-b623-e9b991c5fa40","Type":"ContainerStarted","Data":"d5803400e2cc8fa79ef63c3aa50ed4e98a48c8a2b779f028d2d686144c94cba2"} Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.719788 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nm2b5" event={"ID":"5c2126e6-e955-406b-b623-e9b991c5fa40","Type":"ContainerStarted","Data":"f4fc924984158b8c20ce5c515083fe707882f1b037880250e50b90d5ac70ced9"} Nov 22 04:52:10 crc kubenswrapper[4948]: I1122 04:52:10.722009 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqk2v" event={"ID":"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69","Type":"ContainerStarted","Data":"e636d3df44c649412d275c9a74bf977c9350ac08bc010c57a0fa67507457e1fa"} Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.030270 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2gxgn"] Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.031562 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.034189 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.039980 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2gxgn"] Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.161810 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a270484-f75f-485a-9a16-26782de80ed1-utilities\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.162201 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a270484-f75f-485a-9a16-26782de80ed1-catalog-content\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.162249 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4wg9\" (UniqueName: \"kubernetes.io/projected/4a270484-f75f-485a-9a16-26782de80ed1-kube-api-access-l4wg9\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.263619 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a270484-f75f-485a-9a16-26782de80ed1-utilities\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.264030 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a270484-f75f-485a-9a16-26782de80ed1-catalog-content\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.264160 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4wg9\" (UniqueName: \"kubernetes.io/projected/4a270484-f75f-485a-9a16-26782de80ed1-kube-api-access-l4wg9\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.264286 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a270484-f75f-485a-9a16-26782de80ed1-utilities\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.264920 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a270484-f75f-485a-9a16-26782de80ed1-catalog-content\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.288763 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4wg9\" (UniqueName: \"kubernetes.io/projected/4a270484-f75f-485a-9a16-26782de80ed1-kube-api-access-l4wg9\") pod \"certified-operators-2gxgn\" (UID: \"4a270484-f75f-485a-9a16-26782de80ed1\") " pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.379014 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.611258 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2gxgn"] Nov 22 04:52:11 crc kubenswrapper[4948]: W1122 04:52:11.631413 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a270484_f75f_485a_9a16_26782de80ed1.slice/crio-83f861c92f2f2e131792adc9090c3bad1088f3f7053b113bc1301d655962b1ab WatchSource:0}: Error finding container 83f861c92f2f2e131792adc9090c3bad1088f3f7053b113bc1301d655962b1ab: Status 404 returned error can't find the container with id 83f861c92f2f2e131792adc9090c3bad1088f3f7053b113bc1301d655962b1ab Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.728564 4948 generic.go:334] "Generic (PLEG): container finished" podID="dcb0a74e-a042-44d5-86ad-8a04e2b1fd69" containerID="e636d3df44c649412d275c9a74bf977c9350ac08bc010c57a0fa67507457e1fa" exitCode=0 Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.728645 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqk2v" event={"ID":"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69","Type":"ContainerDied","Data":"e636d3df44c649412d275c9a74bf977c9350ac08bc010c57a0fa67507457e1fa"} Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.729518 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gxgn" event={"ID":"4a270484-f75f-485a-9a16-26782de80ed1","Type":"ContainerStarted","Data":"83f861c92f2f2e131792adc9090c3bad1088f3f7053b113bc1301d655962b1ab"} Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.730872 4948 generic.go:334] "Generic (PLEG): container finished" podID="5c2126e6-e955-406b-b623-e9b991c5fa40" containerID="d5803400e2cc8fa79ef63c3aa50ed4e98a48c8a2b779f028d2d686144c94cba2" exitCode=0 Nov 22 04:52:11 crc kubenswrapper[4948]: I1122 04:52:11.730926 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nm2b5" event={"ID":"5c2126e6-e955-406b-b623-e9b991c5fa40","Type":"ContainerDied","Data":"d5803400e2cc8fa79ef63c3aa50ed4e98a48c8a2b779f028d2d686144c94cba2"} Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.433510 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7w4"] Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.435274 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.437770 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.444671 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7w4"] Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.486924 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrkg5\" (UniqueName: \"kubernetes.io/projected/a5e26833-5534-41ea-abff-eddb319d4ca2-kube-api-access-rrkg5\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.486991 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5e26833-5534-41ea-abff-eddb319d4ca2-utilities\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.487030 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5e26833-5534-41ea-abff-eddb319d4ca2-catalog-content\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.588322 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrkg5\" (UniqueName: \"kubernetes.io/projected/a5e26833-5534-41ea-abff-eddb319d4ca2-kube-api-access-rrkg5\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.588443 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5e26833-5534-41ea-abff-eddb319d4ca2-utilities\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.588545 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5e26833-5534-41ea-abff-eddb319d4ca2-catalog-content\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.589285 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5e26833-5534-41ea-abff-eddb319d4ca2-utilities\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.589309 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5e26833-5534-41ea-abff-eddb319d4ca2-catalog-content\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.611595 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrkg5\" (UniqueName: \"kubernetes.io/projected/a5e26833-5534-41ea-abff-eddb319d4ca2-kube-api-access-rrkg5\") pod \"redhat-marketplace-5z7w4\" (UID: \"a5e26833-5534-41ea-abff-eddb319d4ca2\") " pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.738350 4948 generic.go:334] "Generic (PLEG): container finished" podID="5c2126e6-e955-406b-b623-e9b991c5fa40" containerID="252db67cf219238d999946227ab2de8ed2c9d5d541d017b17e8ba9d93847566f" exitCode=0 Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.738424 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nm2b5" event={"ID":"5c2126e6-e955-406b-b623-e9b991c5fa40","Type":"ContainerDied","Data":"252db67cf219238d999946227ab2de8ed2c9d5d541d017b17e8ba9d93847566f"} Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.742597 4948 generic.go:334] "Generic (PLEG): container finished" podID="4a270484-f75f-485a-9a16-26782de80ed1" containerID="3be5128080751b620d68bb59539d905c1daefeeaf9c359ffa155a028a57cafd3" exitCode=0 Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.742746 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gxgn" event={"ID":"4a270484-f75f-485a-9a16-26782de80ed1","Type":"ContainerDied","Data":"3be5128080751b620d68bb59539d905c1daefeeaf9c359ffa155a028a57cafd3"} Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.746448 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqk2v" event={"ID":"dcb0a74e-a042-44d5-86ad-8a04e2b1fd69","Type":"ContainerStarted","Data":"0b8f87a019ce8623af0473d94582c319d7dc28be7b7fb871e13efc5d15c4c598"} Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.755580 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.778495 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tqk2v" podStartSLOduration=2.312085473 podStartE2EDuration="4.778439631s" podCreationTimestamp="2025-11-22 04:52:08 +0000 UTC" firstStartedPulling="2025-11-22 04:52:09.714898786 +0000 UTC m=+332.400909302" lastFinishedPulling="2025-11-22 04:52:12.181252944 +0000 UTC m=+334.867263460" observedRunningTime="2025-11-22 04:52:12.775404208 +0000 UTC m=+335.461414764" watchObservedRunningTime="2025-11-22 04:52:12.778439631 +0000 UTC m=+335.464450177" Nov 22 04:52:12 crc kubenswrapper[4948]: I1122 04:52:12.978446 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7w4"] Nov 22 04:52:13 crc kubenswrapper[4948]: I1122 04:52:13.753595 4948 generic.go:334] "Generic (PLEG): container finished" podID="a5e26833-5534-41ea-abff-eddb319d4ca2" containerID="2996d66f68efbacde3b5bd0d858d6cf9addd533b769a574bf8a69ea1129cff1e" exitCode=0 Nov 22 04:52:13 crc kubenswrapper[4948]: I1122 04:52:13.753696 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7w4" event={"ID":"a5e26833-5534-41ea-abff-eddb319d4ca2","Type":"ContainerDied","Data":"2996d66f68efbacde3b5bd0d858d6cf9addd533b769a574bf8a69ea1129cff1e"} Nov 22 04:52:13 crc kubenswrapper[4948]: I1122 04:52:13.754078 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7w4" event={"ID":"a5e26833-5534-41ea-abff-eddb319d4ca2","Type":"ContainerStarted","Data":"b935aa42743aa04e9f02e8e41346f4b692bcd27d00921b2d12ce323a8dc35ed4"} Nov 22 04:52:13 crc kubenswrapper[4948]: I1122 04:52:13.756340 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nm2b5" event={"ID":"5c2126e6-e955-406b-b623-e9b991c5fa40","Type":"ContainerStarted","Data":"204e2e9411f4da7c3337212074ae1b46e2593ce8ec0192a620dd99a00bb88abf"} Nov 22 04:52:14 crc kubenswrapper[4948]: I1122 04:52:14.763635 4948 generic.go:334] "Generic (PLEG): container finished" podID="4a270484-f75f-485a-9a16-26782de80ed1" containerID="fa604b1915b6260bcfa4186e0ca1d7ba709ccf2cd583e91c17dcfe38bcd362d0" exitCode=0 Nov 22 04:52:14 crc kubenswrapper[4948]: I1122 04:52:14.763719 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gxgn" event={"ID":"4a270484-f75f-485a-9a16-26782de80ed1","Type":"ContainerDied","Data":"fa604b1915b6260bcfa4186e0ca1d7ba709ccf2cd583e91c17dcfe38bcd362d0"} Nov 22 04:52:14 crc kubenswrapper[4948]: I1122 04:52:14.780551 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nm2b5" podStartSLOduration=2.308720952 podStartE2EDuration="4.780533679s" podCreationTimestamp="2025-11-22 04:52:10 +0000 UTC" firstStartedPulling="2025-11-22 04:52:10.723199381 +0000 UTC m=+333.409209897" lastFinishedPulling="2025-11-22 04:52:13.195012108 +0000 UTC m=+335.881022624" observedRunningTime="2025-11-22 04:52:13.796770835 +0000 UTC m=+336.482781361" watchObservedRunningTime="2025-11-22 04:52:14.780533679 +0000 UTC m=+337.466544195" Nov 22 04:52:18 crc kubenswrapper[4948]: I1122 04:52:18.784840 4948 generic.go:334] "Generic (PLEG): container finished" podID="a5e26833-5534-41ea-abff-eddb319d4ca2" containerID="37c13cb0428644265a42acef92dd27e05a657401bf122dc9fdd541d668f305b8" exitCode=0 Nov 22 04:52:18 crc kubenswrapper[4948]: I1122 04:52:18.785340 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7w4" event={"ID":"a5e26833-5534-41ea-abff-eddb319d4ca2","Type":"ContainerDied","Data":"37c13cb0428644265a42acef92dd27e05a657401bf122dc9fdd541d668f305b8"} Nov 22 04:52:18 crc kubenswrapper[4948]: I1122 04:52:18.791692 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gxgn" event={"ID":"4a270484-f75f-485a-9a16-26782de80ed1","Type":"ContainerStarted","Data":"f6883e8e3018a4176b171efd66240cfccbe8eed91f58940c38eb84a09d62219c"} Nov 22 04:52:18 crc kubenswrapper[4948]: I1122 04:52:18.826896 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2gxgn" podStartSLOduration=5.374354064 podStartE2EDuration="7.826879138s" podCreationTimestamp="2025-11-22 04:52:11 +0000 UTC" firstStartedPulling="2025-11-22 04:52:12.744440458 +0000 UTC m=+335.430450964" lastFinishedPulling="2025-11-22 04:52:15.196965512 +0000 UTC m=+337.882976038" observedRunningTime="2025-11-22 04:52:18.824812435 +0000 UTC m=+341.510822971" watchObservedRunningTime="2025-11-22 04:52:18.826879138 +0000 UTC m=+341.512889644" Nov 22 04:52:18 crc kubenswrapper[4948]: I1122 04:52:18.952226 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:18 crc kubenswrapper[4948]: I1122 04:52:18.952282 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:18 crc kubenswrapper[4948]: I1122 04:52:18.995110 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:19 crc kubenswrapper[4948]: I1122 04:52:19.802569 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7w4" event={"ID":"a5e26833-5534-41ea-abff-eddb319d4ca2","Type":"ContainerStarted","Data":"c91d4f20aaac8ae7a933712da24042f73c00a5d172ae3729a0813d3a4b01364e"} Nov 22 04:52:19 crc kubenswrapper[4948]: I1122 04:52:19.845221 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tqk2v" Nov 22 04:52:19 crc kubenswrapper[4948]: I1122 04:52:19.863199 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5z7w4" podStartSLOduration=2.424127088 podStartE2EDuration="7.863184524s" podCreationTimestamp="2025-11-22 04:52:12 +0000 UTC" firstStartedPulling="2025-11-22 04:52:13.754967373 +0000 UTC m=+336.440977899" lastFinishedPulling="2025-11-22 04:52:19.194024819 +0000 UTC m=+341.880035335" observedRunningTime="2025-11-22 04:52:19.820230887 +0000 UTC m=+342.506241403" watchObservedRunningTime="2025-11-22 04:52:19.863184524 +0000 UTC m=+342.549195040" Nov 22 04:52:20 crc kubenswrapper[4948]: I1122 04:52:20.352510 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:20 crc kubenswrapper[4948]: I1122 04:52:20.352564 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:20 crc kubenswrapper[4948]: I1122 04:52:20.395750 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:20 crc kubenswrapper[4948]: I1122 04:52:20.845549 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nm2b5" Nov 22 04:52:21 crc kubenswrapper[4948]: I1122 04:52:21.379878 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:21 crc kubenswrapper[4948]: I1122 04:52:21.379924 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:21 crc kubenswrapper[4948]: I1122 04:52:21.415209 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:22 crc kubenswrapper[4948]: I1122 04:52:22.760454 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:22 crc kubenswrapper[4948]: I1122 04:52:22.761864 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:22 crc kubenswrapper[4948]: I1122 04:52:22.805744 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:31 crc kubenswrapper[4948]: I1122 04:52:31.432837 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2gxgn" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.179588 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" podUID="656450bf-ce50-4fc8-863e-274359778f85" containerName="oauth-openshift" containerID="cri-o://9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b" gracePeriod=15 Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.602916 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.646758 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7b8b5f6596-zvs26"] Nov 22 04:52:32 crc kubenswrapper[4948]: E1122 04:52:32.647037 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="656450bf-ce50-4fc8-863e-274359778f85" containerName="oauth-openshift" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.647055 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="656450bf-ce50-4fc8-863e-274359778f85" containerName="oauth-openshift" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.647223 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="656450bf-ce50-4fc8-863e-274359778f85" containerName="oauth-openshift" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.647781 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.667015 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7b8b5f6596-zvs26"] Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.686836 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-serving-cert\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.686931 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-idp-0-file-data\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687002 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-login\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687107 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-service-ca\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687180 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-trusted-ca-bundle\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687263 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-error\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687330 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-audit-policies\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687385 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-session\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687429 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/656450bf-ce50-4fc8-863e-274359778f85-audit-dir\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687521 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-router-certs\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687646 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-ocp-branding-template\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687724 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-provider-selection\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687798 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mqbs\" (UniqueName: \"kubernetes.io/projected/656450bf-ce50-4fc8-863e-274359778f85-kube-api-access-9mqbs\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.687861 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-cliconfig\") pod \"656450bf-ce50-4fc8-863e-274359778f85\" (UID: \"656450bf-ce50-4fc8-863e-274359778f85\") " Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688099 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-error\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688172 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-audit-policies\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688243 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688316 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688373 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688435 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnfdb\" (UniqueName: \"kubernetes.io/projected/145b19ab-3687-460c-9a10-9c6f6e9a548d-kube-api-access-xnfdb\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688534 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688614 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-session\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688673 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/145b19ab-3687-460c-9a10-9c6f6e9a548d-audit-dir\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688728 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-login\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688790 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688847 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688904 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.688988 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.689291 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/656450bf-ce50-4fc8-863e-274359778f85-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.693788 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.695304 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.695798 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.696129 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.704123 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.704622 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.706232 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.706284 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.706670 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.706874 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.707052 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.709848 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.710176 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/656450bf-ce50-4fc8-863e-274359778f85-kube-api-access-9mqbs" (OuterVolumeSpecName: "kube-api-access-9mqbs") pod "656450bf-ce50-4fc8-863e-274359778f85" (UID: "656450bf-ce50-4fc8-863e-274359778f85"). InnerVolumeSpecName "kube-api-access-9mqbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.790750 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-session\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.790856 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/145b19ab-3687-460c-9a10-9c6f6e9a548d-audit-dir\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.790916 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-login\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.790981 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791038 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791094 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791174 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791248 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-error\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791323 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/145b19ab-3687-460c-9a10-9c6f6e9a548d-audit-dir\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791410 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-audit-policies\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791551 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791637 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791700 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791761 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnfdb\" (UniqueName: \"kubernetes.io/projected/145b19ab-3687-460c-9a10-9c6f6e9a548d-kube-api-access-xnfdb\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791827 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791923 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791958 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.791987 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792016 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792045 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792075 4948 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792103 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792134 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792163 4948 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/656450bf-ce50-4fc8-863e-274359778f85-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792194 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792223 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792253 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mqbs\" (UniqueName: \"kubernetes.io/projected/656450bf-ce50-4fc8-863e-274359778f85-kube-api-access-9mqbs\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792283 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792313 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/656450bf-ce50-4fc8-863e-274359778f85-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792344 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.792386 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-audit-policies\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.793276 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.793696 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-service-ca\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.794664 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-login\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.795212 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.795698 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.795732 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-user-template-error\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.797017 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.797237 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.800935 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-session\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.801918 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/145b19ab-3687-460c-9a10-9c6f6e9a548d-v4-0-config-system-router-certs\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.802806 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5z7w4" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.826945 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnfdb\" (UniqueName: \"kubernetes.io/projected/145b19ab-3687-460c-9a10-9c6f6e9a548d-kube-api-access-xnfdb\") pod \"oauth-openshift-7b8b5f6596-zvs26\" (UID: \"145b19ab-3687-460c-9a10-9c6f6e9a548d\") " pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.869232 4948 generic.go:334] "Generic (PLEG): container finished" podID="656450bf-ce50-4fc8-863e-274359778f85" containerID="9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b" exitCode=0 Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.869583 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" event={"ID":"656450bf-ce50-4fc8-863e-274359778f85","Type":"ContainerDied","Data":"9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b"} Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.869693 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" event={"ID":"656450bf-ce50-4fc8-863e-274359778f85","Type":"ContainerDied","Data":"5b6840ed5b8219dbb88bb6f6708337b5b1d36168b6d353ee44b4cd0238e4a279"} Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.869806 4948 scope.go:117] "RemoveContainer" containerID="9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.870007 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2vbhk" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.896624 4948 scope.go:117] "RemoveContainer" containerID="9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b" Nov 22 04:52:32 crc kubenswrapper[4948]: E1122 04:52:32.897595 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b\": container with ID starting with 9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b not found: ID does not exist" containerID="9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.897625 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b"} err="failed to get container status \"9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b\": rpc error: code = NotFound desc = could not find container \"9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b\": container with ID starting with 9bc9f0f8a57ad1ef34bf329ee92c464d9ba06f1675f394626b593fe7f42aae5b not found: ID does not exist" Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.905029 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2vbhk"] Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.909668 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2vbhk"] Nov 22 04:52:32 crc kubenswrapper[4948]: I1122 04:52:32.972030 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:33 crc kubenswrapper[4948]: I1122 04:52:33.376507 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7b8b5f6596-zvs26"] Nov 22 04:52:33 crc kubenswrapper[4948]: I1122 04:52:33.767536 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="656450bf-ce50-4fc8-863e-274359778f85" path="/var/lib/kubelet/pods/656450bf-ce50-4fc8-863e-274359778f85/volumes" Nov 22 04:52:33 crc kubenswrapper[4948]: I1122 04:52:33.876580 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" event={"ID":"145b19ab-3687-460c-9a10-9c6f6e9a548d","Type":"ContainerStarted","Data":"e9065c70641cc840bfb3b565ea57b61ec7bb697a129e8c1e955e0fbab7042b4b"} Nov 22 04:52:33 crc kubenswrapper[4948]: I1122 04:52:33.876960 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" event={"ID":"145b19ab-3687-460c-9a10-9c6f6e9a548d","Type":"ContainerStarted","Data":"85ddd8983b744cb66bc0395f60c9a48e12eb5e9ec0912fed3376526ddc3128f6"} Nov 22 04:52:33 crc kubenswrapper[4948]: I1122 04:52:33.877224 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:52:33 crc kubenswrapper[4948]: I1122 04:52:33.905943 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" podStartSLOduration=26.905923344 podStartE2EDuration="26.905923344s" podCreationTimestamp="2025-11-22 04:52:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:52:33.903991965 +0000 UTC m=+356.590002521" watchObservedRunningTime="2025-11-22 04:52:33.905923344 +0000 UTC m=+356.591933870" Nov 22 04:52:33 crc kubenswrapper[4948]: I1122 04:52:33.951263 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7b8b5f6596-zvs26" Nov 22 04:53:29 crc kubenswrapper[4948]: I1122 04:53:29.789442 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:53:29 crc kubenswrapper[4948]: I1122 04:53:29.789993 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.834774 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-8d8pn"] Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.835915 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.854593 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-8d8pn"] Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.982893 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.982959 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.982976 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts29v\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-kube-api-access-ts29v\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.983032 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-trusted-ca\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.983070 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-registry-tls\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.983088 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-bound-sa-token\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.983113 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:45 crc kubenswrapper[4948]: I1122 04:53:45.983139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-registry-certificates\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.014435 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.084604 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-registry-tls\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.084684 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-bound-sa-token\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.084796 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-registry-certificates\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.084850 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.084901 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.084933 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts29v\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-kube-api-access-ts29v\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.086069 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.086223 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-trusted-ca\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.087081 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-registry-certificates\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.088927 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-trusted-ca\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.092642 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-registry-tls\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.094138 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.106903 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts29v\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-kube-api-access-ts29v\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.113273 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a4eaf6a-378d-4b31-8f91-8f4d589a4dac-bound-sa-token\") pod \"image-registry-66df7c8f76-8d8pn\" (UID: \"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac\") " pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.149997 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:46 crc kubenswrapper[4948]: I1122 04:53:46.381854 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-8d8pn"] Nov 22 04:53:46 crc kubenswrapper[4948]: W1122 04:53:46.387901 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a4eaf6a_378d_4b31_8f91_8f4d589a4dac.slice/crio-2a93cd788f0707f6a261fc23faf9caf54ac0362ff45a8ad949bcaf643ba0c79f WatchSource:0}: Error finding container 2a93cd788f0707f6a261fc23faf9caf54ac0362ff45a8ad949bcaf643ba0c79f: Status 404 returned error can't find the container with id 2a93cd788f0707f6a261fc23faf9caf54ac0362ff45a8ad949bcaf643ba0c79f Nov 22 04:53:47 crc kubenswrapper[4948]: I1122 04:53:47.355029 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" event={"ID":"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac","Type":"ContainerStarted","Data":"0599481b8cbaca31b597ed9094f76c673a0ddbccfde388bb9b2075bfa26a8095"} Nov 22 04:53:47 crc kubenswrapper[4948]: I1122 04:53:47.355554 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" event={"ID":"6a4eaf6a-378d-4b31-8f91-8f4d589a4dac","Type":"ContainerStarted","Data":"2a93cd788f0707f6a261fc23faf9caf54ac0362ff45a8ad949bcaf643ba0c79f"} Nov 22 04:53:47 crc kubenswrapper[4948]: I1122 04:53:47.355599 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:53:47 crc kubenswrapper[4948]: I1122 04:53:47.389604 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" podStartSLOduration=2.389575226 podStartE2EDuration="2.389575226s" podCreationTimestamp="2025-11-22 04:53:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:53:47.385839385 +0000 UTC m=+430.071849971" watchObservedRunningTime="2025-11-22 04:53:47.389575226 +0000 UTC m=+430.075585782" Nov 22 04:53:59 crc kubenswrapper[4948]: I1122 04:53:59.789579 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:53:59 crc kubenswrapper[4948]: I1122 04:53:59.791119 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:54:06 crc kubenswrapper[4948]: I1122 04:54:06.156643 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-8d8pn" Nov 22 04:54:06 crc kubenswrapper[4948]: I1122 04:54:06.241609 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v78bn"] Nov 22 04:54:29 crc kubenswrapper[4948]: I1122 04:54:29.790315 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:54:29 crc kubenswrapper[4948]: I1122 04:54:29.792533 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:54:29 crc kubenswrapper[4948]: I1122 04:54:29.792986 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:54:29 crc kubenswrapper[4948]: I1122 04:54:29.794736 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"10bcdeb83b488e25d8fdd90a34d3e765880c41249f4b51d6922e659268909eeb"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 04:54:29 crc kubenswrapper[4948]: I1122 04:54:29.795151 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://10bcdeb83b488e25d8fdd90a34d3e765880c41249f4b51d6922e659268909eeb" gracePeriod=600 Nov 22 04:54:30 crc kubenswrapper[4948]: I1122 04:54:30.619987 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="10bcdeb83b488e25d8fdd90a34d3e765880c41249f4b51d6922e659268909eeb" exitCode=0 Nov 22 04:54:30 crc kubenswrapper[4948]: I1122 04:54:30.620207 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"10bcdeb83b488e25d8fdd90a34d3e765880c41249f4b51d6922e659268909eeb"} Nov 22 04:54:30 crc kubenswrapper[4948]: I1122 04:54:30.620282 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"74c12ce3297a891692a52980f92e3b4d67bbfb18ea9a3348de4d0832ad26ef13"} Nov 22 04:54:30 crc kubenswrapper[4948]: I1122 04:54:30.620307 4948 scope.go:117] "RemoveContainer" containerID="4008eacc16d59b14804b5bc1bf31406f0ecbae14fc80ebc69d80e618285acd58" Nov 22 04:54:31 crc kubenswrapper[4948]: I1122 04:54:31.286375 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" podUID="1d2caf26-e32d-412f-b764-a050f5a5840c" containerName="registry" containerID="cri-o://204b97ddba5473e9a01aee50f11629b64ab734abb719953ab55eba84ff303052" gracePeriod=30 Nov 22 04:54:31 crc kubenswrapper[4948]: I1122 04:54:31.370916 4948 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-v78bn container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.29:5000/healthz\": dial tcp 10.217.0.29:5000: connect: connection refused" start-of-body= Nov 22 04:54:31 crc kubenswrapper[4948]: I1122 04:54:31.371237 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" podUID="1d2caf26-e32d-412f-b764-a050f5a5840c" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.29:5000/healthz\": dial tcp 10.217.0.29:5000: connect: connection refused" Nov 22 04:54:31 crc kubenswrapper[4948]: I1122 04:54:31.629906 4948 generic.go:334] "Generic (PLEG): container finished" podID="1d2caf26-e32d-412f-b764-a050f5a5840c" containerID="204b97ddba5473e9a01aee50f11629b64ab734abb719953ab55eba84ff303052" exitCode=0 Nov 22 04:54:31 crc kubenswrapper[4948]: I1122 04:54:31.630030 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" event={"ID":"1d2caf26-e32d-412f-b764-a050f5a5840c","Type":"ContainerDied","Data":"204b97ddba5473e9a01aee50f11629b64ab734abb719953ab55eba84ff303052"} Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.217720 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.403822 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d2caf26-e32d-412f-b764-a050f5a5840c-ca-trust-extracted\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.403904 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-tls\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.403953 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d2caf26-e32d-412f-b764-a050f5a5840c-installation-pull-secrets\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.404275 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.404329 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kzvh\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-kube-api-access-6kzvh\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.404371 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-trusted-ca\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.404459 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-certificates\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.404542 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-bound-sa-token\") pod \"1d2caf26-e32d-412f-b764-a050f5a5840c\" (UID: \"1d2caf26-e32d-412f-b764-a050f5a5840c\") " Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.405580 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.405786 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.406120 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.406162 4948 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.413263 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.414306 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.415338 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2caf26-e32d-412f-b764-a050f5a5840c-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.415525 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-kube-api-access-6kzvh" (OuterVolumeSpecName: "kube-api-access-6kzvh") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "kube-api-access-6kzvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.431620 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.435085 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d2caf26-e32d-412f-b764-a050f5a5840c-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "1d2caf26-e32d-412f-b764-a050f5a5840c" (UID: "1d2caf26-e32d-412f-b764-a050f5a5840c"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.506791 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.506841 4948 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d2caf26-e32d-412f-b764-a050f5a5840c-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.506860 4948 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.506878 4948 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d2caf26-e32d-412f-b764-a050f5a5840c-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.506901 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kzvh\" (UniqueName: \"kubernetes.io/projected/1d2caf26-e32d-412f-b764-a050f5a5840c-kube-api-access-6kzvh\") on node \"crc\" DevicePath \"\"" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.639047 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" event={"ID":"1d2caf26-e32d-412f-b764-a050f5a5840c","Type":"ContainerDied","Data":"c25002b7dfc514ef1199051fab7b220d25bd09cde42ba5ee01923d6e5ae36e87"} Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.639076 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v78bn" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.639110 4948 scope.go:117] "RemoveContainer" containerID="204b97ddba5473e9a01aee50f11629b64ab734abb719953ab55eba84ff303052" Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.666065 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v78bn"] Nov 22 04:54:32 crc kubenswrapper[4948]: I1122 04:54:32.673159 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v78bn"] Nov 22 04:54:33 crc kubenswrapper[4948]: I1122 04:54:33.763800 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d2caf26-e32d-412f-b764-a050f5a5840c" path="/var/lib/kubelet/pods/1d2caf26-e32d-412f-b764-a050f5a5840c/volumes" Nov 22 04:56:59 crc kubenswrapper[4948]: I1122 04:56:59.790053 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:56:59 crc kubenswrapper[4948]: I1122 04:56:59.790773 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:57:29 crc kubenswrapper[4948]: I1122 04:57:29.790030 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:57:29 crc kubenswrapper[4948]: I1122 04:57:29.790735 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:57:59 crc kubenswrapper[4948]: I1122 04:57:59.790220 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 04:57:59 crc kubenswrapper[4948]: I1122 04:57:59.791082 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 04:57:59 crc kubenswrapper[4948]: I1122 04:57:59.791158 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 04:57:59 crc kubenswrapper[4948]: I1122 04:57:59.792042 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"74c12ce3297a891692a52980f92e3b4d67bbfb18ea9a3348de4d0832ad26ef13"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 04:57:59 crc kubenswrapper[4948]: I1122 04:57:59.792155 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://74c12ce3297a891692a52980f92e3b4d67bbfb18ea9a3348de4d0832ad26ef13" gracePeriod=600 Nov 22 04:58:00 crc kubenswrapper[4948]: I1122 04:58:00.008238 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="74c12ce3297a891692a52980f92e3b4d67bbfb18ea9a3348de4d0832ad26ef13" exitCode=0 Nov 22 04:58:00 crc kubenswrapper[4948]: I1122 04:58:00.008376 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"74c12ce3297a891692a52980f92e3b4d67bbfb18ea9a3348de4d0832ad26ef13"} Nov 22 04:58:00 crc kubenswrapper[4948]: I1122 04:58:00.008808 4948 scope.go:117] "RemoveContainer" containerID="10bcdeb83b488e25d8fdd90a34d3e765880c41249f4b51d6922e659268909eeb" Nov 22 04:58:01 crc kubenswrapper[4948]: I1122 04:58:01.019926 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"d1063d3de2076e619f45df2f69f8b545ea06fe47defd910d05a953ec8383f798"} Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.033739 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bspvz"] Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.034811 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-controller" containerID="cri-o://540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.034871 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.034882 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="nbdb" containerID="cri-o://238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.034992 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="northd" containerID="cri-o://222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.034960 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-acl-logging" containerID="cri-o://2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.034945 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-node" containerID="cri-o://9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.035259 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="sbdb" containerID="cri-o://58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.085506 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" containerID="cri-o://450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" gracePeriod=30 Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.383805 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/3.log" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.386957 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovn-acl-logging/0.log" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.387722 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovn-controller/0.log" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.388405 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452417 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-pp6gd"] Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452674 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452689 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452700 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d2caf26-e32d-412f-b764-a050f5a5840c" containerName="registry" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452707 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d2caf26-e32d-412f-b764-a050f5a5840c" containerName="registry" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452720 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="nbdb" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452728 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="nbdb" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452746 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452754 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452770 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452777 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452787 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="northd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452796 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="northd" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452804 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452812 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452824 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="sbdb" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452831 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="sbdb" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452841 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-node" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452849 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-node" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452865 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452873 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452883 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kubecfg-setup" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452891 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kubecfg-setup" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452900 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-acl-logging" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452908 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-acl-logging" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.452917 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.452925 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453050 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453063 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453074 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453082 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d2caf26-e32d-412f-b764-a050f5a5840c" containerName="registry" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453094 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="nbdb" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453106 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453118 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-node" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453132 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-acl-logging" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453144 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="sbdb" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453158 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="northd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453174 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453188 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovn-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: E1122 04:58:17.453317 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453331 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.453491 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" containerName="ovnkube-controller" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.455451 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458331 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-kubelet\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458376 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-etc-openvswitch\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458396 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-systemd\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458416 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-config\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458433 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-ovn\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458480 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-var-lib-cni-networks-ovn-kubernetes\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458508 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-node-log\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458522 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-log-socket\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458536 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-openvswitch\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458563 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-script-lib\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458585 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrcwm\" (UniqueName: \"kubernetes.io/projected/bad3107e-91a9-463d-b981-fb102616bdbe-kube-api-access-hrcwm\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458602 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-systemd-units\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458618 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bad3107e-91a9-463d-b981-fb102616bdbe-ovn-node-metrics-cert\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458635 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-bin\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458652 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-slash\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458678 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-netd\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458698 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-ovn-kubernetes\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458721 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-env-overrides\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458740 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-var-lib-openvswitch\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458764 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-netns\") pod \"bad3107e-91a9-463d-b981-fb102616bdbe\" (UID: \"bad3107e-91a9-463d-b981-fb102616bdbe\") " Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458854 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-cni-bin\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458881 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458905 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-ovn\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458929 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-run-netns\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458946 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458962 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458978 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-env-overrides\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.458996 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-cni-netd\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovnkube-script-lib\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459034 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-var-lib-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459061 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-log-socket\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459075 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459090 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-systemd-units\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459164 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-systemd\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459168 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-slash" (OuterVolumeSpecName: "host-slash") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459190 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459211 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459213 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459248 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459614 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459637 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459652 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459675 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459897 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459929 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-node-log" (OuterVolumeSpecName: "node-log") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459948 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-log-socket" (OuterVolumeSpecName: "log-socket") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459971 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459981 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.459906 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460287 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-node-log\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460547 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-etc-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460626 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovnkube-config\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460633 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460663 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-kubelet\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460695 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovn-node-metrics-cert\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460753 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg48s\" (UniqueName: \"kubernetes.io/projected/1b1e594d-c826-43d2-a8b3-83c01f9570a6-kube-api-access-wg48s\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460800 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-slash\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460913 4948 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460944 4948 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460963 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.460983 4948 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461004 4948 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461023 4948 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-node-log\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461039 4948 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-log-socket\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461052 4948 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461094 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461123 4948 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461143 4948 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461162 4948 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-slash\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461178 4948 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461195 4948 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461215 4948 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bad3107e-91a9-463d-b981-fb102616bdbe-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461231 4948 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.461266 4948 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.466555 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bad3107e-91a9-463d-b981-fb102616bdbe-kube-api-access-hrcwm" (OuterVolumeSpecName: "kube-api-access-hrcwm") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "kube-api-access-hrcwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.467126 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bad3107e-91a9-463d-b981-fb102616bdbe-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.475422 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "bad3107e-91a9-463d-b981-fb102616bdbe" (UID: "bad3107e-91a9-463d-b981-fb102616bdbe"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562540 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-ovn\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562582 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-run-netns\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562602 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562618 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562636 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-env-overrides\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562651 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-cni-netd\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562668 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-var-lib-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562689 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovnkube-script-lib\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562706 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-log-socket\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562721 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-systemd-units\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562736 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-systemd\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562734 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562761 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-node-log\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562777 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-run-netns\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562807 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-node-log\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562823 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-systemd-units\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562828 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-log-socket\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562855 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-var-lib-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562726 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562837 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-systemd\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562882 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-etc-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-etc-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562936 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovnkube-config\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562950 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-cni-netd\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562995 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-kubelet\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.562964 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-kubelet\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563079 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovn-node-metrics-cert\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563153 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-slash\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563196 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg48s\" (UniqueName: \"kubernetes.io/projected/1b1e594d-c826-43d2-a8b3-83c01f9570a6-kube-api-access-wg48s\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563234 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-slash\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563243 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563283 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-openvswitch\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563296 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-cni-bin\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563333 4948 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bad3107e-91a9-463d-b981-fb102616bdbe-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563344 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrcwm\" (UniqueName: \"kubernetes.io/projected/bad3107e-91a9-463d-b981-fb102616bdbe-kube-api-access-hrcwm\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563355 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bad3107e-91a9-463d-b981-fb102616bdbe-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563376 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-host-cni-bin\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563678 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-env-overrides\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563742 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1b1e594d-c826-43d2-a8b3-83c01f9570a6-run-ovn\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563821 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovnkube-script-lib\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.563848 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovnkube-config\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.567076 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1b1e594d-c826-43d2-a8b3-83c01f9570a6-ovn-node-metrics-cert\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.582145 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg48s\" (UniqueName: \"kubernetes.io/projected/1b1e594d-c826-43d2-a8b3-83c01f9570a6-kube-api-access-wg48s\") pod \"ovnkube-node-pp6gd\" (UID: \"1b1e594d-c826-43d2-a8b3-83c01f9570a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:17 crc kubenswrapper[4948]: I1122 04:58:17.773200 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.128764 4948 generic.go:334] "Generic (PLEG): container finished" podID="1b1e594d-c826-43d2-a8b3-83c01f9570a6" containerID="c4aabde523ad06836eb84ed001843db3b2a2b073a13fd539643a0166af85f068" exitCode=0 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.128860 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerDied","Data":"c4aabde523ad06836eb84ed001843db3b2a2b073a13fd539643a0166af85f068"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.128894 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"9a61942605ca1b19bc3ad931ded931af8f3c3aad5b71e18d102e7cb1ebfac38f"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.136001 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/2.log" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.136859 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/1.log" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.136906 4948 generic.go:334] "Generic (PLEG): container finished" podID="7a2e6333-2885-4eaf-a4b3-6613127e6375" containerID="65292ad5c1b26c893ba431368e9d2a9c6cf6a06c2be1de9ff0dad5f538be179f" exitCode=2 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.136966 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerDied","Data":"65292ad5c1b26c893ba431368e9d2a9c6cf6a06c2be1de9ff0dad5f538be179f"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.136996 4948 scope.go:117] "RemoveContainer" containerID="c08f2661298f84a686c7e96715dbd6ff2b0c66ee240f3cfa1cf6a0c71769a33f" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.137835 4948 scope.go:117] "RemoveContainer" containerID="65292ad5c1b26c893ba431368e9d2a9c6cf6a06c2be1de9ff0dad5f538be179f" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.138219 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-mw95l_openshift-multus(7a2e6333-2885-4eaf-a4b3-6613127e6375)\"" pod="openshift-multus/multus-mw95l" podUID="7a2e6333-2885-4eaf-a4b3-6613127e6375" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.142359 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovnkube-controller/3.log" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.148444 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovn-acl-logging/0.log" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.149524 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bspvz_bad3107e-91a9-463d-b981-fb102616bdbe/ovn-controller/0.log" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.150779 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.150790 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.150881 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" exitCode=0 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.152380 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" exitCode=0 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.152575 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" exitCode=0 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.152717 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" exitCode=0 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.152857 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" exitCode=0 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.152990 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" exitCode=0 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.153126 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" exitCode=143 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.153245 4948 generic.go:334] "Generic (PLEG): container finished" podID="bad3107e-91a9-463d-b981-fb102616bdbe" containerID="540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" exitCode=143 Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.152602 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.153511 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.153661 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.153800 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.153920 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154037 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154182 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154333 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154535 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154669 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154772 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154879 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.154985 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155086 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155193 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155311 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155434 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155590 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155709 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155810 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.155919 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156026 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156127 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156234 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156339 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156524 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156669 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156793 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.156905 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157057 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157172 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157274 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157387 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157512 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157636 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157801 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.157964 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159077 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bspvz" event={"ID":"bad3107e-91a9-463d-b981-fb102616bdbe","Type":"ContainerDied","Data":"4f8eaf05b8c804884443cc00633061cb268ec71a5495302043a55b4d4987f56f"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159266 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159406 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159562 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159679 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159789 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159889 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.159999 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.160101 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.160198 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.160307 4948 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.222372 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bspvz"] Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.230176 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bspvz"] Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.248596 4948 scope.go:117] "RemoveContainer" containerID="450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.265291 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.293405 4948 scope.go:117] "RemoveContainer" containerID="58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.316501 4948 scope.go:117] "RemoveContainer" containerID="238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.333084 4948 scope.go:117] "RemoveContainer" containerID="222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.347616 4948 scope.go:117] "RemoveContainer" containerID="74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.368679 4948 scope.go:117] "RemoveContainer" containerID="9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.407951 4948 scope.go:117] "RemoveContainer" containerID="2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.439385 4948 scope.go:117] "RemoveContainer" containerID="540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.458983 4948 scope.go:117] "RemoveContainer" containerID="65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.474014 4948 scope.go:117] "RemoveContainer" containerID="450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.474375 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": container with ID starting with 450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144 not found: ID does not exist" containerID="450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.474408 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} err="failed to get container status \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": rpc error: code = NotFound desc = could not find container \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": container with ID starting with 450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.474437 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.474709 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": container with ID starting with 3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500 not found: ID does not exist" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.474751 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} err="failed to get container status \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": rpc error: code = NotFound desc = could not find container \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": container with ID starting with 3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.474777 4948 scope.go:117] "RemoveContainer" containerID="58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.475247 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": container with ID starting with 58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c not found: ID does not exist" containerID="58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.475263 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} err="failed to get container status \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": rpc error: code = NotFound desc = could not find container \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": container with ID starting with 58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.475277 4948 scope.go:117] "RemoveContainer" containerID="238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.475938 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": container with ID starting with 238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c not found: ID does not exist" containerID="238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.475963 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} err="failed to get container status \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": rpc error: code = NotFound desc = could not find container \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": container with ID starting with 238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.475979 4948 scope.go:117] "RemoveContainer" containerID="222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.476232 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": container with ID starting with 222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4 not found: ID does not exist" containerID="222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.476270 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} err="failed to get container status \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": rpc error: code = NotFound desc = could not find container \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": container with ID starting with 222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.476286 4948 scope.go:117] "RemoveContainer" containerID="74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.477658 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": container with ID starting with 74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0 not found: ID does not exist" containerID="74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.477689 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} err="failed to get container status \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": rpc error: code = NotFound desc = could not find container \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": container with ID starting with 74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.477720 4948 scope.go:117] "RemoveContainer" containerID="9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.480027 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": container with ID starting with 9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9 not found: ID does not exist" containerID="9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480051 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} err="failed to get container status \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": rpc error: code = NotFound desc = could not find container \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": container with ID starting with 9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480068 4948 scope.go:117] "RemoveContainer" containerID="2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.480278 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": container with ID starting with 2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841 not found: ID does not exist" containerID="2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480296 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} err="failed to get container status \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": rpc error: code = NotFound desc = could not find container \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": container with ID starting with 2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480307 4948 scope.go:117] "RemoveContainer" containerID="540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.480505 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": container with ID starting with 540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db not found: ID does not exist" containerID="540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480524 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} err="failed to get container status \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": rpc error: code = NotFound desc = could not find container \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": container with ID starting with 540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480535 4948 scope.go:117] "RemoveContainer" containerID="65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6" Nov 22 04:58:18 crc kubenswrapper[4948]: E1122 04:58:18.480728 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": container with ID starting with 65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6 not found: ID does not exist" containerID="65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480747 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} err="failed to get container status \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": rpc error: code = NotFound desc = could not find container \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": container with ID starting with 65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.480760 4948 scope.go:117] "RemoveContainer" containerID="450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.482614 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} err="failed to get container status \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": rpc error: code = NotFound desc = could not find container \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": container with ID starting with 450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.482633 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.482826 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} err="failed to get container status \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": rpc error: code = NotFound desc = could not find container \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": container with ID starting with 3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.482856 4948 scope.go:117] "RemoveContainer" containerID="58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.483102 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} err="failed to get container status \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": rpc error: code = NotFound desc = could not find container \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": container with ID starting with 58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.483129 4948 scope.go:117] "RemoveContainer" containerID="238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.483527 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} err="failed to get container status \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": rpc error: code = NotFound desc = could not find container \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": container with ID starting with 238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.483579 4948 scope.go:117] "RemoveContainer" containerID="222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.483860 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} err="failed to get container status \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": rpc error: code = NotFound desc = could not find container \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": container with ID starting with 222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.483885 4948 scope.go:117] "RemoveContainer" containerID="74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.484097 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} err="failed to get container status \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": rpc error: code = NotFound desc = could not find container \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": container with ID starting with 74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.484126 4948 scope.go:117] "RemoveContainer" containerID="9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.484337 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} err="failed to get container status \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": rpc error: code = NotFound desc = could not find container \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": container with ID starting with 9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.484363 4948 scope.go:117] "RemoveContainer" containerID="2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.484837 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} err="failed to get container status \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": rpc error: code = NotFound desc = could not find container \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": container with ID starting with 2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.484862 4948 scope.go:117] "RemoveContainer" containerID="540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485067 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} err="failed to get container status \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": rpc error: code = NotFound desc = could not find container \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": container with ID starting with 540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485087 4948 scope.go:117] "RemoveContainer" containerID="65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485278 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} err="failed to get container status \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": rpc error: code = NotFound desc = could not find container \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": container with ID starting with 65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485301 4948 scope.go:117] "RemoveContainer" containerID="450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485579 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} err="failed to get container status \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": rpc error: code = NotFound desc = could not find container \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": container with ID starting with 450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485602 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485799 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} err="failed to get container status \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": rpc error: code = NotFound desc = could not find container \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": container with ID starting with 3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.485823 4948 scope.go:117] "RemoveContainer" containerID="58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486059 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} err="failed to get container status \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": rpc error: code = NotFound desc = could not find container \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": container with ID starting with 58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486087 4948 scope.go:117] "RemoveContainer" containerID="238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486315 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} err="failed to get container status \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": rpc error: code = NotFound desc = could not find container \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": container with ID starting with 238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486340 4948 scope.go:117] "RemoveContainer" containerID="222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486567 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} err="failed to get container status \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": rpc error: code = NotFound desc = could not find container \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": container with ID starting with 222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486595 4948 scope.go:117] "RemoveContainer" containerID="74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486802 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} err="failed to get container status \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": rpc error: code = NotFound desc = could not find container \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": container with ID starting with 74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.486824 4948 scope.go:117] "RemoveContainer" containerID="9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487013 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} err="failed to get container status \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": rpc error: code = NotFound desc = could not find container \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": container with ID starting with 9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487037 4948 scope.go:117] "RemoveContainer" containerID="2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487199 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} err="failed to get container status \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": rpc error: code = NotFound desc = could not find container \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": container with ID starting with 2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487222 4948 scope.go:117] "RemoveContainer" containerID="540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487413 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} err="failed to get container status \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": rpc error: code = NotFound desc = could not find container \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": container with ID starting with 540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487440 4948 scope.go:117] "RemoveContainer" containerID="65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487682 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} err="failed to get container status \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": rpc error: code = NotFound desc = could not find container \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": container with ID starting with 65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487705 4948 scope.go:117] "RemoveContainer" containerID="450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487910 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144"} err="failed to get container status \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": rpc error: code = NotFound desc = could not find container \"450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144\": container with ID starting with 450c75a7aac3ce1946e7cd1a38b03b75bb9e05128a9ffefd2d1982e4361b4144 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.487932 4948 scope.go:117] "RemoveContainer" containerID="3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488148 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500"} err="failed to get container status \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": rpc error: code = NotFound desc = could not find container \"3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500\": container with ID starting with 3165aa0c89381ab1763020c8dd00f1535db64eaf777f632946924d64d1dd1500 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488179 4948 scope.go:117] "RemoveContainer" containerID="58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488397 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c"} err="failed to get container status \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": rpc error: code = NotFound desc = could not find container \"58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c\": container with ID starting with 58159f61e2566df198109eafab07251cb94fa440673b76031872dbe909bb955c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488419 4948 scope.go:117] "RemoveContainer" containerID="238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488638 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c"} err="failed to get container status \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": rpc error: code = NotFound desc = could not find container \"238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c\": container with ID starting with 238cd62d9e160bf414fdd8ad67be1db8f37588940a025376561ed141fd8f412c not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488663 4948 scope.go:117] "RemoveContainer" containerID="222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488851 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4"} err="failed to get container status \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": rpc error: code = NotFound desc = could not find container \"222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4\": container with ID starting with 222cf9996d0fb42fdae9f7d1ec28c980cea7c983f346d59b7d0f13efd506fdc4 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.488877 4948 scope.go:117] "RemoveContainer" containerID="74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489094 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0"} err="failed to get container status \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": rpc error: code = NotFound desc = could not find container \"74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0\": container with ID starting with 74c920f8791b00954e001697abb357246a9f313c8ea4df1481728f71faa3a9a0 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489118 4948 scope.go:117] "RemoveContainer" containerID="9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489352 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9"} err="failed to get container status \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": rpc error: code = NotFound desc = could not find container \"9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9\": container with ID starting with 9b96e01a336d75188e8834e5e784dcab2636b07edaae7afd0962921ae72b7fe9 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489373 4948 scope.go:117] "RemoveContainer" containerID="2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489608 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841"} err="failed to get container status \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": rpc error: code = NotFound desc = could not find container \"2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841\": container with ID starting with 2fb603b2ffe0506484fdd150d30b7b02c50ea80d070e446d6f65176044038841 not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489631 4948 scope.go:117] "RemoveContainer" containerID="540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489798 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db"} err="failed to get container status \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": rpc error: code = NotFound desc = could not find container \"540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db\": container with ID starting with 540b2a161320535eabb1c1f3118bd6e3e6eee4f17d3c1d22aad9a86322c279db not found: ID does not exist" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.489819 4948 scope.go:117] "RemoveContainer" containerID="65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6" Nov 22 04:58:18 crc kubenswrapper[4948]: I1122 04:58:18.490134 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6"} err="failed to get container status \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": rpc error: code = NotFound desc = could not find container \"65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6\": container with ID starting with 65a6c9ba97ac1d687a82dbb9954c4a4ae5cba88b914de6a0610ba3c1d8b637a6 not found: ID does not exist" Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.169700 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"5bf2ce2cc065f8d1cf287f67318abfda57610f571f7aa8fb70d124233cc24395"} Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.170116 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"6d99408039737558208dfcb1a69f7235266e5a25b0d8b28baea98ff3ec59ffa8"} Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.170141 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"454a843304577cf3ada20b407f84625a6a2aa12dd96f62c03c66e92202023f57"} Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.170162 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"43ca658ddd9d6e90437f39e65ada6c09ea62927e0a8f2653d7b69af3627e03d2"} Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.170181 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"cdcc9ffb211dbfe0fa9fdcef12f43457c7899d20766ea90dca1fcfe5acca3eb7"} Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.170199 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"cb21aab15fa1591cb5c0acf4e6462671c9cb3bb556521d20f83c286cd9613961"} Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.172072 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/2.log" Nov 22 04:58:19 crc kubenswrapper[4948]: I1122 04:58:19.765651 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bad3107e-91a9-463d-b981-fb102616bdbe" path="/var/lib/kubelet/pods/bad3107e-91a9-463d-b981-fb102616bdbe/volumes" Nov 22 04:58:21 crc kubenswrapper[4948]: I1122 04:58:21.187212 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"dba69f513539080e13b4f274604446a4b4f34f8fceaa82097f9fc1d5d479050f"} Nov 22 04:58:24 crc kubenswrapper[4948]: I1122 04:58:24.207949 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" event={"ID":"1b1e594d-c826-43d2-a8b3-83c01f9570a6","Type":"ContainerStarted","Data":"25470010746c3fcbb45c26c3661538d166d5df0c45acc9568a227bf1a82180eb"} Nov 22 04:58:24 crc kubenswrapper[4948]: I1122 04:58:24.208872 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:24 crc kubenswrapper[4948]: I1122 04:58:24.208897 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:24 crc kubenswrapper[4948]: I1122 04:58:24.208940 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:24 crc kubenswrapper[4948]: I1122 04:58:24.243631 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:24 crc kubenswrapper[4948]: I1122 04:58:24.247073 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:24 crc kubenswrapper[4948]: I1122 04:58:24.256416 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" podStartSLOduration=7.256384136 podStartE2EDuration="7.256384136s" podCreationTimestamp="2025-11-22 04:58:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:58:24.249599877 +0000 UTC m=+706.935610433" watchObservedRunningTime="2025-11-22 04:58:24.256384136 +0000 UTC m=+706.942394692" Nov 22 04:58:29 crc kubenswrapper[4948]: I1122 04:58:29.763090 4948 scope.go:117] "RemoveContainer" containerID="65292ad5c1b26c893ba431368e9d2a9c6cf6a06c2be1de9ff0dad5f538be179f" Nov 22 04:58:29 crc kubenswrapper[4948]: E1122 04:58:29.764392 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-mw95l_openshift-multus(7a2e6333-2885-4eaf-a4b3-6613127e6375)\"" pod="openshift-multus/multus-mw95l" podUID="7a2e6333-2885-4eaf-a4b3-6613127e6375" Nov 22 04:58:41 crc kubenswrapper[4948]: I1122 04:58:41.758424 4948 scope.go:117] "RemoveContainer" containerID="65292ad5c1b26c893ba431368e9d2a9c6cf6a06c2be1de9ff0dad5f538be179f" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.333528 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mw95l_7a2e6333-2885-4eaf-a4b3-6613127e6375/kube-multus/2.log" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.333639 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mw95l" event={"ID":"7a2e6333-2885-4eaf-a4b3-6613127e6375","Type":"ContainerStarted","Data":"e73d25cd50ed2bc61ab6a022e2cc812cee13c1ee2d76511492f135c3501010ea"} Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.599997 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk"] Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.601554 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.603457 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.607725 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk"] Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.690851 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.690937 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.691080 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrn9q\" (UniqueName: \"kubernetes.io/projected/280cc6a0-df02-4d30-83c5-2c927594480b-kube-api-access-rrn9q\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.792792 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrn9q\" (UniqueName: \"kubernetes.io/projected/280cc6a0-df02-4d30-83c5-2c927594480b-kube-api-access-rrn9q\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.794100 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.794151 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.795270 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.796548 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.833896 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrn9q\" (UniqueName: \"kubernetes.io/projected/280cc6a0-df02-4d30-83c5-2c927594480b-kube-api-access-rrn9q\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: I1122 04:58:42.914892 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: E1122 04:58:42.957100 4948 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_openshift-marketplace_280cc6a0-df02-4d30-83c5-2c927594480b_0(eac5c896efab656d41abf93ae7c2ce33015131de8109c82e35ea478e468c418c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 04:58:42 crc kubenswrapper[4948]: E1122 04:58:42.957267 4948 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_openshift-marketplace_280cc6a0-df02-4d30-83c5-2c927594480b_0(eac5c896efab656d41abf93ae7c2ce33015131de8109c82e35ea478e468c418c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: E1122 04:58:42.957306 4948 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_openshift-marketplace_280cc6a0-df02-4d30-83c5-2c927594480b_0(eac5c896efab656d41abf93ae7c2ce33015131de8109c82e35ea478e468c418c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:42 crc kubenswrapper[4948]: E1122 04:58:42.957410 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_openshift-marketplace(280cc6a0-df02-4d30-83c5-2c927594480b)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_openshift-marketplace(280cc6a0-df02-4d30-83c5-2c927594480b)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_openshift-marketplace_280cc6a0-df02-4d30-83c5-2c927594480b_0(eac5c896efab656d41abf93ae7c2ce33015131de8109c82e35ea478e468c418c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" Nov 22 04:58:43 crc kubenswrapper[4948]: I1122 04:58:43.341923 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:43 crc kubenswrapper[4948]: I1122 04:58:43.342652 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:43 crc kubenswrapper[4948]: I1122 04:58:43.604901 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk"] Nov 22 04:58:43 crc kubenswrapper[4948]: W1122 04:58:43.613965 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod280cc6a0_df02_4d30_83c5_2c927594480b.slice/crio-902d861545c08ca483383acaba46ff8a88e9d3aa44d10e4833e0b4b660ba0783 WatchSource:0}: Error finding container 902d861545c08ca483383acaba46ff8a88e9d3aa44d10e4833e0b4b660ba0783: Status 404 returned error can't find the container with id 902d861545c08ca483383acaba46ff8a88e9d3aa44d10e4833e0b4b660ba0783 Nov 22 04:58:44 crc kubenswrapper[4948]: I1122 04:58:44.361337 4948 generic.go:334] "Generic (PLEG): container finished" podID="280cc6a0-df02-4d30-83c5-2c927594480b" containerID="c8eae30b38723881f93688e5d2ade133b921f59c6f98cd47d2e7302dcd0db344" exitCode=0 Nov 22 04:58:44 crc kubenswrapper[4948]: I1122 04:58:44.361640 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" event={"ID":"280cc6a0-df02-4d30-83c5-2c927594480b","Type":"ContainerDied","Data":"c8eae30b38723881f93688e5d2ade133b921f59c6f98cd47d2e7302dcd0db344"} Nov 22 04:58:44 crc kubenswrapper[4948]: I1122 04:58:44.362632 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" event={"ID":"280cc6a0-df02-4d30-83c5-2c927594480b","Type":"ContainerStarted","Data":"902d861545c08ca483383acaba46ff8a88e9d3aa44d10e4833e0b4b660ba0783"} Nov 22 04:58:44 crc kubenswrapper[4948]: I1122 04:58:44.364879 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 04:58:47 crc kubenswrapper[4948]: I1122 04:58:47.387348 4948 generic.go:334] "Generic (PLEG): container finished" podID="280cc6a0-df02-4d30-83c5-2c927594480b" containerID="98e8258c92a9847efc03435a10dd4c98df344eb3c04cd441b768dec07d0b6ba4" exitCode=0 Nov 22 04:58:47 crc kubenswrapper[4948]: I1122 04:58:47.387509 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" event={"ID":"280cc6a0-df02-4d30-83c5-2c927594480b","Type":"ContainerDied","Data":"98e8258c92a9847efc03435a10dd4c98df344eb3c04cd441b768dec07d0b6ba4"} Nov 22 04:58:47 crc kubenswrapper[4948]: I1122 04:58:47.803963 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pp6gd" Nov 22 04:58:48 crc kubenswrapper[4948]: I1122 04:58:48.395940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" event={"ID":"280cc6a0-df02-4d30-83c5-2c927594480b","Type":"ContainerStarted","Data":"eb3bb179d3e6ee18c6587d2f8445874fd4499889328906afecd70aeb032d6a0a"} Nov 22 04:58:49 crc kubenswrapper[4948]: I1122 04:58:49.404080 4948 generic.go:334] "Generic (PLEG): container finished" podID="280cc6a0-df02-4d30-83c5-2c927594480b" containerID="eb3bb179d3e6ee18c6587d2f8445874fd4499889328906afecd70aeb032d6a0a" exitCode=0 Nov 22 04:58:49 crc kubenswrapper[4948]: I1122 04:58:49.404145 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" event={"ID":"280cc6a0-df02-4d30-83c5-2c927594480b","Type":"ContainerDied","Data":"eb3bb179d3e6ee18c6587d2f8445874fd4499889328906afecd70aeb032d6a0a"} Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.694077 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.799618 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrn9q\" (UniqueName: \"kubernetes.io/projected/280cc6a0-df02-4d30-83c5-2c927594480b-kube-api-access-rrn9q\") pod \"280cc6a0-df02-4d30-83c5-2c927594480b\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.799756 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-bundle\") pod \"280cc6a0-df02-4d30-83c5-2c927594480b\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.800798 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-util\") pod \"280cc6a0-df02-4d30-83c5-2c927594480b\" (UID: \"280cc6a0-df02-4d30-83c5-2c927594480b\") " Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.801984 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-bundle" (OuterVolumeSpecName: "bundle") pod "280cc6a0-df02-4d30-83c5-2c927594480b" (UID: "280cc6a0-df02-4d30-83c5-2c927594480b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.808415 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/280cc6a0-df02-4d30-83c5-2c927594480b-kube-api-access-rrn9q" (OuterVolumeSpecName: "kube-api-access-rrn9q") pod "280cc6a0-df02-4d30-83c5-2c927594480b" (UID: "280cc6a0-df02-4d30-83c5-2c927594480b"). InnerVolumeSpecName "kube-api-access-rrn9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.824229 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-util" (OuterVolumeSpecName: "util") pod "280cc6a0-df02-4d30-83c5-2c927594480b" (UID: "280cc6a0-df02-4d30-83c5-2c927594480b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.903557 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrn9q\" (UniqueName: \"kubernetes.io/projected/280cc6a0-df02-4d30-83c5-2c927594480b-kube-api-access-rrn9q\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.903612 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:50 crc kubenswrapper[4948]: I1122 04:58:50.903634 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/280cc6a0-df02-4d30-83c5-2c927594480b-util\") on node \"crc\" DevicePath \"\"" Nov 22 04:58:51 crc kubenswrapper[4948]: I1122 04:58:51.424350 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" event={"ID":"280cc6a0-df02-4d30-83c5-2c927594480b","Type":"ContainerDied","Data":"902d861545c08ca483383acaba46ff8a88e9d3aa44d10e4833e0b4b660ba0783"} Nov 22 04:58:51 crc kubenswrapper[4948]: I1122 04:58:51.424850 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="902d861545c08ca483383acaba46ff8a88e9d3aa44d10e4833e0b4b660ba0783" Nov 22 04:58:51 crc kubenswrapper[4948]: I1122 04:58:51.424451 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.005481 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs"] Nov 22 04:59:00 crc kubenswrapper[4948]: E1122 04:59:00.006226 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" containerName="util" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.006241 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" containerName="util" Nov 22 04:59:00 crc kubenswrapper[4948]: E1122 04:59:00.006256 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" containerName="pull" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.006264 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" containerName="pull" Nov 22 04:59:00 crc kubenswrapper[4948]: E1122 04:59:00.006278 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" containerName="extract" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.006287 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" containerName="extract" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.006401 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="280cc6a0-df02-4d30-83c5-2c927594480b" containerName="extract" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.006888 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.009290 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.009645 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.009736 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.010919 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-ngtkj" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.010968 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.027388 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs"] Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.118156 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cc6bfaa2-11b1-48d0-92c5-e025633693b8-apiservice-cert\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.118236 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cc6bfaa2-11b1-48d0-92c5-e025633693b8-webhook-cert\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.118548 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qxxq\" (UniqueName: \"kubernetes.io/projected/cc6bfaa2-11b1-48d0-92c5-e025633693b8-kube-api-access-6qxxq\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.220074 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qxxq\" (UniqueName: \"kubernetes.io/projected/cc6bfaa2-11b1-48d0-92c5-e025633693b8-kube-api-access-6qxxq\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.220137 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cc6bfaa2-11b1-48d0-92c5-e025633693b8-apiservice-cert\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.220173 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cc6bfaa2-11b1-48d0-92c5-e025633693b8-webhook-cert\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.228141 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cc6bfaa2-11b1-48d0-92c5-e025633693b8-webhook-cert\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.237579 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cc6bfaa2-11b1-48d0-92c5-e025633693b8-apiservice-cert\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.246012 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qxxq\" (UniqueName: \"kubernetes.io/projected/cc6bfaa2-11b1-48d0-92c5-e025633693b8-kube-api-access-6qxxq\") pod \"metallb-operator-controller-manager-c4c4f5766-cqwrs\" (UID: \"cc6bfaa2-11b1-48d0-92c5-e025633693b8\") " pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.324579 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.377099 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r"] Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.377694 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.380239 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.380305 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.396840 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-ggj6w" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.412458 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r"] Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.435681 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzjwl\" (UniqueName: \"kubernetes.io/projected/ff397fa2-bc96-49ee-a508-2c0da701972a-kube-api-access-fzjwl\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.435759 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ff397fa2-bc96-49ee-a508-2c0da701972a-webhook-cert\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.435786 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ff397fa2-bc96-49ee-a508-2c0da701972a-apiservice-cert\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.537277 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzjwl\" (UniqueName: \"kubernetes.io/projected/ff397fa2-bc96-49ee-a508-2c0da701972a-kube-api-access-fzjwl\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.537344 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ff397fa2-bc96-49ee-a508-2c0da701972a-webhook-cert\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.537370 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ff397fa2-bc96-49ee-a508-2c0da701972a-apiservice-cert\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.551643 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ff397fa2-bc96-49ee-a508-2c0da701972a-webhook-cert\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.553928 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ff397fa2-bc96-49ee-a508-2c0da701972a-apiservice-cert\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.576019 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzjwl\" (UniqueName: \"kubernetes.io/projected/ff397fa2-bc96-49ee-a508-2c0da701972a-kube-api-access-fzjwl\") pod \"metallb-operator-webhook-server-789cf9d5c8-mhm7r\" (UID: \"ff397fa2-bc96-49ee-a508-2c0da701972a\") " pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.598973 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs"] Nov 22 04:59:00 crc kubenswrapper[4948]: W1122 04:59:00.614383 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc6bfaa2_11b1_48d0_92c5_e025633693b8.slice/crio-24093d9fc5cfca21b2a237bc3abee0e3c025c2e013d7184ceffedb30aff1f2ca WatchSource:0}: Error finding container 24093d9fc5cfca21b2a237bc3abee0e3c025c2e013d7184ceffedb30aff1f2ca: Status 404 returned error can't find the container with id 24093d9fc5cfca21b2a237bc3abee0e3c025c2e013d7184ceffedb30aff1f2ca Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.703176 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:00 crc kubenswrapper[4948]: I1122 04:59:00.956767 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r"] Nov 22 04:59:00 crc kubenswrapper[4948]: W1122 04:59:00.966277 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff397fa2_bc96_49ee_a508_2c0da701972a.slice/crio-192b27d602b125a38ecef282ec9d2f9a31cdf52702eca59cc50566912dc570c5 WatchSource:0}: Error finding container 192b27d602b125a38ecef282ec9d2f9a31cdf52702eca59cc50566912dc570c5: Status 404 returned error can't find the container with id 192b27d602b125a38ecef282ec9d2f9a31cdf52702eca59cc50566912dc570c5 Nov 22 04:59:01 crc kubenswrapper[4948]: I1122 04:59:01.491203 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" event={"ID":"ff397fa2-bc96-49ee-a508-2c0da701972a","Type":"ContainerStarted","Data":"192b27d602b125a38ecef282ec9d2f9a31cdf52702eca59cc50566912dc570c5"} Nov 22 04:59:01 crc kubenswrapper[4948]: I1122 04:59:01.492249 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" event={"ID":"cc6bfaa2-11b1-48d0-92c5-e025633693b8","Type":"ContainerStarted","Data":"24093d9fc5cfca21b2a237bc3abee0e3c025c2e013d7184ceffedb30aff1f2ca"} Nov 22 04:59:05 crc kubenswrapper[4948]: I1122 04:59:05.513586 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" event={"ID":"cc6bfaa2-11b1-48d0-92c5-e025633693b8","Type":"ContainerStarted","Data":"c3256b1b233b91a084192146369c1b5ee3c8f70c362e826bcbde166a14ab2536"} Nov 22 04:59:05 crc kubenswrapper[4948]: I1122 04:59:05.514067 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:05 crc kubenswrapper[4948]: I1122 04:59:05.515612 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" event={"ID":"ff397fa2-bc96-49ee-a508-2c0da701972a","Type":"ContainerStarted","Data":"8784cbe6d168df555fd79f37a51a04b79fad441881e7439284ffe72e1e989ba7"} Nov 22 04:59:05 crc kubenswrapper[4948]: I1122 04:59:05.516086 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:05 crc kubenswrapper[4948]: I1122 04:59:05.537767 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" podStartSLOduration=1.938177359 podStartE2EDuration="6.537749062s" podCreationTimestamp="2025-11-22 04:58:59 +0000 UTC" firstStartedPulling="2025-11-22 04:59:00.618369978 +0000 UTC m=+743.304380494" lastFinishedPulling="2025-11-22 04:59:05.217941681 +0000 UTC m=+747.903952197" observedRunningTime="2025-11-22 04:59:05.532141803 +0000 UTC m=+748.218152319" watchObservedRunningTime="2025-11-22 04:59:05.537749062 +0000 UTC m=+748.223759578" Nov 22 04:59:05 crc kubenswrapper[4948]: I1122 04:59:05.553823 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" podStartSLOduration=1.285347764 podStartE2EDuration="5.553804876s" podCreationTimestamp="2025-11-22 04:59:00 +0000 UTC" firstStartedPulling="2025-11-22 04:59:00.968696112 +0000 UTC m=+743.654706638" lastFinishedPulling="2025-11-22 04:59:05.237153234 +0000 UTC m=+747.923163750" observedRunningTime="2025-11-22 04:59:05.552010425 +0000 UTC m=+748.238020971" watchObservedRunningTime="2025-11-22 04:59:05.553804876 +0000 UTC m=+748.239815392" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.135582 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-fznqf"] Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.136198 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" podUID="9a821dfa-73f0-4d83-b480-f566a1ce12fc" containerName="controller-manager" containerID="cri-o://093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf" gracePeriod=30 Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.217933 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8"] Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.218241 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" podUID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" containerName="route-controller-manager" containerID="cri-o://d84eaec122ca7d2665a3189eab86133e059605cce2370cc7ac31994fac9d8c3e" gracePeriod=30 Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.488855 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.531732 4948 generic.go:334] "Generic (PLEG): container finished" podID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" containerID="d84eaec122ca7d2665a3189eab86133e059605cce2370cc7ac31994fac9d8c3e" exitCode=0 Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.531806 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" event={"ID":"45a847ab-7976-4dbe-9ccc-5c89490b7c52","Type":"ContainerDied","Data":"d84eaec122ca7d2665a3189eab86133e059605cce2370cc7ac31994fac9d8c3e"} Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.534007 4948 generic.go:334] "Generic (PLEG): container finished" podID="9a821dfa-73f0-4d83-b480-f566a1ce12fc" containerID="093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf" exitCode=0 Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.534049 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" event={"ID":"9a821dfa-73f0-4d83-b480-f566a1ce12fc","Type":"ContainerDied","Data":"093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf"} Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.534085 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" event={"ID":"9a821dfa-73f0-4d83-b480-f566a1ce12fc","Type":"ContainerDied","Data":"284809c4d061cef6f1f66ab634a3275d9b8dba31b20f1124979d8df08b53db50"} Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.534104 4948 scope.go:117] "RemoveContainer" containerID="093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.534229 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-fznqf" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.550791 4948 scope.go:117] "RemoveContainer" containerID="093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf" Nov 22 04:59:08 crc kubenswrapper[4948]: E1122 04:59:08.551234 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf\": container with ID starting with 093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf not found: ID does not exist" containerID="093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.551277 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf"} err="failed to get container status \"093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf\": rpc error: code = NotFound desc = could not find container \"093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf\": container with ID starting with 093d9b50b02f45aa448f1ff4862c36f0bcf8947b04f42d273ddd75eed592f8cf not found: ID does not exist" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.563531 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-config\") pod \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.563589 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a821dfa-73f0-4d83-b480-f566a1ce12fc-serving-cert\") pod \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.563642 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-client-ca\") pod \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.563680 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkzqs\" (UniqueName: \"kubernetes.io/projected/9a821dfa-73f0-4d83-b480-f566a1ce12fc-kube-api-access-pkzqs\") pod \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.563711 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-proxy-ca-bundles\") pod \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\" (UID: \"9a821dfa-73f0-4d83-b480-f566a1ce12fc\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.564571 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-config" (OuterVolumeSpecName: "config") pod "9a821dfa-73f0-4d83-b480-f566a1ce12fc" (UID: "9a821dfa-73f0-4d83-b480-f566a1ce12fc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.565645 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-client-ca" (OuterVolumeSpecName: "client-ca") pod "9a821dfa-73f0-4d83-b480-f566a1ce12fc" (UID: "9a821dfa-73f0-4d83-b480-f566a1ce12fc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.567705 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9a821dfa-73f0-4d83-b480-f566a1ce12fc" (UID: "9a821dfa-73f0-4d83-b480-f566a1ce12fc"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.570485 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a821dfa-73f0-4d83-b480-f566a1ce12fc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9a821dfa-73f0-4d83-b480-f566a1ce12fc" (UID: "9a821dfa-73f0-4d83-b480-f566a1ce12fc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.584868 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a821dfa-73f0-4d83-b480-f566a1ce12fc-kube-api-access-pkzqs" (OuterVolumeSpecName: "kube-api-access-pkzqs") pod "9a821dfa-73f0-4d83-b480-f566a1ce12fc" (UID: "9a821dfa-73f0-4d83-b480-f566a1ce12fc"). InnerVolumeSpecName "kube-api-access-pkzqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.594893 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664423 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-config\") pod \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664541 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45a847ab-7976-4dbe-9ccc-5c89490b7c52-serving-cert\") pod \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664586 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-client-ca\") pod \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664669 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvdl2\" (UniqueName: \"kubernetes.io/projected/45a847ab-7976-4dbe-9ccc-5c89490b7c52-kube-api-access-qvdl2\") pod \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\" (UID: \"45a847ab-7976-4dbe-9ccc-5c89490b7c52\") " Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664906 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkzqs\" (UniqueName: \"kubernetes.io/projected/9a821dfa-73f0-4d83-b480-f566a1ce12fc-kube-api-access-pkzqs\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664928 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664940 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664953 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a821dfa-73f0-4d83-b480-f566a1ce12fc-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.664964 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9a821dfa-73f0-4d83-b480-f566a1ce12fc-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.665329 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-client-ca" (OuterVolumeSpecName: "client-ca") pod "45a847ab-7976-4dbe-9ccc-5c89490b7c52" (UID: "45a847ab-7976-4dbe-9ccc-5c89490b7c52"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.665340 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-config" (OuterVolumeSpecName: "config") pod "45a847ab-7976-4dbe-9ccc-5c89490b7c52" (UID: "45a847ab-7976-4dbe-9ccc-5c89490b7c52"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.667787 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45a847ab-7976-4dbe-9ccc-5c89490b7c52-kube-api-access-qvdl2" (OuterVolumeSpecName: "kube-api-access-qvdl2") pod "45a847ab-7976-4dbe-9ccc-5c89490b7c52" (UID: "45a847ab-7976-4dbe-9ccc-5c89490b7c52"). InnerVolumeSpecName "kube-api-access-qvdl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.672313 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a847ab-7976-4dbe-9ccc-5c89490b7c52-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "45a847ab-7976-4dbe-9ccc-5c89490b7c52" (UID: "45a847ab-7976-4dbe-9ccc-5c89490b7c52"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.766352 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-config\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.766382 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45a847ab-7976-4dbe-9ccc-5c89490b7c52-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.766391 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/45a847ab-7976-4dbe-9ccc-5c89490b7c52-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.766401 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvdl2\" (UniqueName: \"kubernetes.io/projected/45a847ab-7976-4dbe-9ccc-5c89490b7c52-kube-api-access-qvdl2\") on node \"crc\" DevicePath \"\"" Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.858140 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-fznqf"] Nov 22 04:59:08 crc kubenswrapper[4948]: I1122 04:59:08.863309 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-fznqf"] Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.540871 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" event={"ID":"45a847ab-7976-4dbe-9ccc-5c89490b7c52","Type":"ContainerDied","Data":"7f13ef20180fd70e4c6715dbc9ca40ed03e88537320784e1a41952f080a96303"} Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.540897 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.541261 4948 scope.go:117] "RemoveContainer" containerID="d84eaec122ca7d2665a3189eab86133e059605cce2370cc7ac31994fac9d8c3e" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.568096 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8"] Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.578228 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-l7pw8"] Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.766779 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" path="/var/lib/kubelet/pods/45a847ab-7976-4dbe-9ccc-5c89490b7c52/volumes" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.767545 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a821dfa-73f0-4d83-b480-f566a1ce12fc" path="/var/lib/kubelet/pods/9a821dfa-73f0-4d83-b480-f566a1ce12fc/volumes" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.820420 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-55994f8d78-fnxfz"] Nov 22 04:59:09 crc kubenswrapper[4948]: E1122 04:59:09.820640 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a821dfa-73f0-4d83-b480-f566a1ce12fc" containerName="controller-manager" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.820651 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a821dfa-73f0-4d83-b480-f566a1ce12fc" containerName="controller-manager" Nov 22 04:59:09 crc kubenswrapper[4948]: E1122 04:59:09.820663 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" containerName="route-controller-manager" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.820669 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" containerName="route-controller-manager" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.820759 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a821dfa-73f0-4d83-b480-f566a1ce12fc" containerName="controller-manager" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.820769 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a847ab-7976-4dbe-9ccc-5c89490b7c52" containerName="route-controller-manager" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.821090 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.824584 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.824839 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.825316 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.825635 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.827425 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.827643 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.841222 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.843681 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55994f8d78-fnxfz"] Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.854169 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst"] Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.854981 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.857653 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.857895 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.858085 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.858225 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.858361 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.858516 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.877440 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst"] Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.879990 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-client-ca\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.880050 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm8fh\" (UniqueName: \"kubernetes.io/projected/0825b3dd-f357-4bda-bee2-95e2761edd79-kube-api-access-zm8fh\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.880708 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8mk2\" (UniqueName: \"kubernetes.io/projected/840b335b-2d57-4b47-a63b-affc070b618c-kube-api-access-t8mk2\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.880735 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/840b335b-2d57-4b47-a63b-affc070b618c-serving-cert\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.881924 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0825b3dd-f357-4bda-bee2-95e2761edd79-config\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.881980 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0825b3dd-f357-4bda-bee2-95e2761edd79-serving-cert\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.882053 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0825b3dd-f357-4bda-bee2-95e2761edd79-client-ca\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.882077 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-proxy-ca-bundles\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.882319 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-config\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983590 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0825b3dd-f357-4bda-bee2-95e2761edd79-config\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983628 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0825b3dd-f357-4bda-bee2-95e2761edd79-serving-cert\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983656 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0825b3dd-f357-4bda-bee2-95e2761edd79-client-ca\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983676 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-proxy-ca-bundles\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983707 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-config\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983753 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-client-ca\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983778 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm8fh\" (UniqueName: \"kubernetes.io/projected/0825b3dd-f357-4bda-bee2-95e2761edd79-kube-api-access-zm8fh\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983811 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8mk2\" (UniqueName: \"kubernetes.io/projected/840b335b-2d57-4b47-a63b-affc070b618c-kube-api-access-t8mk2\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.983830 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/840b335b-2d57-4b47-a63b-affc070b618c-serving-cert\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.984719 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0825b3dd-f357-4bda-bee2-95e2761edd79-client-ca\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.985022 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-client-ca\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.985043 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0825b3dd-f357-4bda-bee2-95e2761edd79-config\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.985214 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-config\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.985215 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/840b335b-2d57-4b47-a63b-affc070b618c-proxy-ca-bundles\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.988126 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/840b335b-2d57-4b47-a63b-affc070b618c-serving-cert\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:09 crc kubenswrapper[4948]: I1122 04:59:09.988565 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0825b3dd-f357-4bda-bee2-95e2761edd79-serving-cert\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.002056 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8mk2\" (UniqueName: \"kubernetes.io/projected/840b335b-2d57-4b47-a63b-affc070b618c-kube-api-access-t8mk2\") pod \"controller-manager-55994f8d78-fnxfz\" (UID: \"840b335b-2d57-4b47-a63b-affc070b618c\") " pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.010484 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm8fh\" (UniqueName: \"kubernetes.io/projected/0825b3dd-f357-4bda-bee2-95e2761edd79-kube-api-access-zm8fh\") pod \"route-controller-manager-5cc69cd97f-69jst\" (UID: \"0825b3dd-f357-4bda-bee2-95e2761edd79\") " pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.135702 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.168860 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.325984 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55994f8d78-fnxfz"] Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.447204 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst"] Nov 22 04:59:10 crc kubenswrapper[4948]: W1122 04:59:10.458425 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0825b3dd_f357_4bda_bee2_95e2761edd79.slice/crio-f18aedd3908f9f9e356215d63ace16f346eaf50f37f2598b7baa8e76b7c47589 WatchSource:0}: Error finding container f18aedd3908f9f9e356215d63ace16f346eaf50f37f2598b7baa8e76b7c47589: Status 404 returned error can't find the container with id f18aedd3908f9f9e356215d63ace16f346eaf50f37f2598b7baa8e76b7c47589 Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.558811 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" event={"ID":"840b335b-2d57-4b47-a63b-affc070b618c","Type":"ContainerStarted","Data":"52f03ae8508eb7a6a160cc7c3005b3a369f7175a7d1146de8af4d5cc7967727e"} Nov 22 04:59:10 crc kubenswrapper[4948]: I1122 04:59:10.560078 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" event={"ID":"0825b3dd-f357-4bda-bee2-95e2761edd79","Type":"ContainerStarted","Data":"f18aedd3908f9f9e356215d63ace16f346eaf50f37f2598b7baa8e76b7c47589"} Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.565543 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" event={"ID":"840b335b-2d57-4b47-a63b-affc070b618c","Type":"ContainerStarted","Data":"74946e377345c8661dce7dc3c089f9294c5bc3c89d9a9f7a60ae044b2f1e418e"} Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.566071 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.567218 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" event={"ID":"0825b3dd-f357-4bda-bee2-95e2761edd79","Type":"ContainerStarted","Data":"e02bd6bc9f77dcaa9ca236e6409c3ebe1ce7fb6a9e9b5ffbe212bd00fba34874"} Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.567523 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.570491 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.574257 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.585190 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-55994f8d78-fnxfz" podStartSLOduration=2.585176566 podStartE2EDuration="2.585176566s" podCreationTimestamp="2025-11-22 04:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:59:11.584309481 +0000 UTC m=+754.270320007" watchObservedRunningTime="2025-11-22 04:59:11.585176566 +0000 UTC m=+754.271187082" Nov 22 04:59:11 crc kubenswrapper[4948]: I1122 04:59:11.636040 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5cc69cd97f-69jst" podStartSLOduration=2.636023103 podStartE2EDuration="2.636023103s" podCreationTimestamp="2025-11-22 04:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 04:59:11.612416336 +0000 UTC m=+754.298426852" watchObservedRunningTime="2025-11-22 04:59:11.636023103 +0000 UTC m=+754.322033619" Nov 22 04:59:17 crc kubenswrapper[4948]: I1122 04:59:17.938299 4948 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 04:59:20 crc kubenswrapper[4948]: I1122 04:59:20.712585 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-789cf9d5c8-mhm7r" Nov 22 04:59:40 crc kubenswrapper[4948]: I1122 04:59:40.328555 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-c4c4f5766-cqwrs" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.048899 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.049607 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.055514 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.055900 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-86hvb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.065793 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-fnfgf"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.074714 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.074845 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.078296 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.078587 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.135802 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-m2xpv"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.137798 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.143573 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.143856 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.143914 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-zkvnw" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.144046 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.147809 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-gdrxf"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.151642 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.154440 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.161731 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-gdrxf"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.230912 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2p85\" (UniqueName: \"kubernetes.io/projected/fd5e0eea-0522-4358-88af-b2b648549f7b-kube-api-access-n2p85\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.230973 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvdkd\" (UniqueName: \"kubernetes.io/projected/0d29ddea-72d4-4194-8050-8e302d8000a1-kube-api-access-mvdkd\") pod \"frr-k8s-webhook-server-6998585d5-d8hlb\" (UID: \"0d29ddea-72d4-4194-8050-8e302d8000a1\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.231004 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.231051 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-reloader\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.231093 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-conf\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.231119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics-certs\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.231139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0d29ddea-72d4-4194-8050-8e302d8000a1-cert\") pod \"frr-k8s-webhook-server-6998585d5-d8hlb\" (UID: \"0d29ddea-72d4-4194-8050-8e302d8000a1\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.231205 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-sockets\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.231226 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-startup\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.331978 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332060 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2p85\" (UniqueName: \"kubernetes.io/projected/fd5e0eea-0522-4358-88af-b2b648549f7b-kube-api-access-n2p85\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332093 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvdkd\" (UniqueName: \"kubernetes.io/projected/0d29ddea-72d4-4194-8050-8e302d8000a1-kube-api-access-mvdkd\") pod \"frr-k8s-webhook-server-6998585d5-d8hlb\" (UID: \"0d29ddea-72d4-4194-8050-8e302d8000a1\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332116 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332144 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/049ab730-c768-4140-98aa-ac16df011ab1-cert\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332166 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/67c508f6-24a6-4683-96e1-e324b8a6f5b8-metallb-excludel2\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332185 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-reloader\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332215 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/049ab730-c768-4140-98aa-ac16df011ab1-metrics-certs\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332235 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-conf\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332257 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwjq9\" (UniqueName: \"kubernetes.io/projected/67c508f6-24a6-4683-96e1-e324b8a6f5b8-kube-api-access-hwjq9\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332282 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics-certs\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332302 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0d29ddea-72d4-4194-8050-8e302d8000a1-cert\") pod \"frr-k8s-webhook-server-6998585d5-d8hlb\" (UID: \"0d29ddea-72d4-4194-8050-8e302d8000a1\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332321 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmv6j\" (UniqueName: \"kubernetes.io/projected/049ab730-c768-4140-98aa-ac16df011ab1-kube-api-access-dmv6j\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332347 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-metrics-certs\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332378 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-sockets\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.332397 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-startup\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: E1122 04:59:41.333307 4948 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 22 04:59:41 crc kubenswrapper[4948]: E1122 04:59:41.333362 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics-certs podName:fd5e0eea-0522-4358-88af-b2b648549f7b nodeName:}" failed. No retries permitted until 2025-11-22 04:59:41.833344272 +0000 UTC m=+784.519354788 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics-certs") pod "frr-k8s-fnfgf" (UID: "fd5e0eea-0522-4358-88af-b2b648549f7b") : secret "frr-k8s-certs-secret" not found Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.333434 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-startup\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.333559 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.333769 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-reloader\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.333870 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-conf\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.333907 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fd5e0eea-0522-4358-88af-b2b648549f7b-frr-sockets\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.341911 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0d29ddea-72d4-4194-8050-8e302d8000a1-cert\") pod \"frr-k8s-webhook-server-6998585d5-d8hlb\" (UID: \"0d29ddea-72d4-4194-8050-8e302d8000a1\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.356233 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvdkd\" (UniqueName: \"kubernetes.io/projected/0d29ddea-72d4-4194-8050-8e302d8000a1-kube-api-access-mvdkd\") pod \"frr-k8s-webhook-server-6998585d5-d8hlb\" (UID: \"0d29ddea-72d4-4194-8050-8e302d8000a1\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.362842 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2p85\" (UniqueName: \"kubernetes.io/projected/fd5e0eea-0522-4358-88af-b2b648549f7b-kube-api-access-n2p85\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.379759 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.433967 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/049ab730-c768-4140-98aa-ac16df011ab1-cert\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.434029 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/67c508f6-24a6-4683-96e1-e324b8a6f5b8-metallb-excludel2\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.434064 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/049ab730-c768-4140-98aa-ac16df011ab1-metrics-certs\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.434089 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwjq9\" (UniqueName: \"kubernetes.io/projected/67c508f6-24a6-4683-96e1-e324b8a6f5b8-kube-api-access-hwjq9\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.434132 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmv6j\" (UniqueName: \"kubernetes.io/projected/049ab730-c768-4140-98aa-ac16df011ab1-kube-api-access-dmv6j\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.434161 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-metrics-certs\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.434204 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: E1122 04:59:41.434400 4948 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 04:59:41 crc kubenswrapper[4948]: E1122 04:59:41.434456 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist podName:67c508f6-24a6-4683-96e1-e324b8a6f5b8 nodeName:}" failed. No retries permitted until 2025-11-22 04:59:41.934437942 +0000 UTC m=+784.620448458 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist") pod "speaker-m2xpv" (UID: "67c508f6-24a6-4683-96e1-e324b8a6f5b8") : secret "metallb-memberlist" not found Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.435292 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/67c508f6-24a6-4683-96e1-e324b8a6f5b8-metallb-excludel2\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.436998 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.438525 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-metrics-certs\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.439268 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/049ab730-c768-4140-98aa-ac16df011ab1-metrics-certs\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.447199 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/049ab730-c768-4140-98aa-ac16df011ab1-cert\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.457885 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwjq9\" (UniqueName: \"kubernetes.io/projected/67c508f6-24a6-4683-96e1-e324b8a6f5b8-kube-api-access-hwjq9\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.458091 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmv6j\" (UniqueName: \"kubernetes.io/projected/049ab730-c768-4140-98aa-ac16df011ab1-kube-api-access-dmv6j\") pod \"controller-6c7b4b5f48-gdrxf\" (UID: \"049ab730-c768-4140-98aa-ac16df011ab1\") " pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.483680 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.818281 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.839780 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics-certs\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.843934 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd5e0eea-0522-4358-88af-b2b648549f7b-metrics-certs\") pod \"frr-k8s-fnfgf\" (UID: \"fd5e0eea-0522-4358-88af-b2b648549f7b\") " pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.883133 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-gdrxf"] Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.940775 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:41 crc kubenswrapper[4948]: E1122 04:59:41.941042 4948 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 04:59:41 crc kubenswrapper[4948]: E1122 04:59:41.941127 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist podName:67c508f6-24a6-4683-96e1-e324b8a6f5b8 nodeName:}" failed. No retries permitted until 2025-11-22 04:59:42.941105834 +0000 UTC m=+785.627116360 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist") pod "speaker-m2xpv" (UID: "67c508f6-24a6-4683-96e1-e324b8a6f5b8") : secret "metallb-memberlist" not found Nov 22 04:59:41 crc kubenswrapper[4948]: I1122 04:59:41.996209 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:42 crc kubenswrapper[4948]: I1122 04:59:42.743489 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" event={"ID":"0d29ddea-72d4-4194-8050-8e302d8000a1","Type":"ContainerStarted","Data":"9fcaec506e89c4aad89e7e327d68a762bb264c1a998767119d6915fe10be071c"} Nov 22 04:59:42 crc kubenswrapper[4948]: I1122 04:59:42.744721 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerStarted","Data":"423a2bbf1c7abbff395a2a9592a86ced53cba22511ccd9a5ba7a673b8272c08f"} Nov 22 04:59:42 crc kubenswrapper[4948]: I1122 04:59:42.746079 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-gdrxf" event={"ID":"049ab730-c768-4140-98aa-ac16df011ab1","Type":"ContainerStarted","Data":"beecd84f77001fce54c8e25b0886f0a4a3d0dafbc867c8e8047c106207a86ac2"} Nov 22 04:59:42 crc kubenswrapper[4948]: I1122 04:59:42.746132 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-gdrxf" event={"ID":"049ab730-c768-4140-98aa-ac16df011ab1","Type":"ContainerStarted","Data":"2d4ce2db09035500e133a78f66ed86bb5767fff8e8a7faafb53c4219b2ba7a5f"} Nov 22 04:59:42 crc kubenswrapper[4948]: I1122 04:59:42.955158 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:42 crc kubenswrapper[4948]: I1122 04:59:42.963656 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/67c508f6-24a6-4683-96e1-e324b8a6f5b8-memberlist\") pod \"speaker-m2xpv\" (UID: \"67c508f6-24a6-4683-96e1-e324b8a6f5b8\") " pod="metallb-system/speaker-m2xpv" Nov 22 04:59:43 crc kubenswrapper[4948]: I1122 04:59:43.262985 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-m2xpv" Nov 22 04:59:43 crc kubenswrapper[4948]: W1122 04:59:43.336186 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67c508f6_24a6_4683_96e1_e324b8a6f5b8.slice/crio-08489084340badcd3927f2cb11052f2cfd717cb59e20adf3cfa5c546f7ed336a WatchSource:0}: Error finding container 08489084340badcd3927f2cb11052f2cfd717cb59e20adf3cfa5c546f7ed336a: Status 404 returned error can't find the container with id 08489084340badcd3927f2cb11052f2cfd717cb59e20adf3cfa5c546f7ed336a Nov 22 04:59:43 crc kubenswrapper[4948]: I1122 04:59:43.795370 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-m2xpv" event={"ID":"67c508f6-24a6-4683-96e1-e324b8a6f5b8","Type":"ContainerStarted","Data":"eb9ebec4fbef448c1eb11fa09d82af3aa9d89ffd98d2aa5ca599676523ce43e6"} Nov 22 04:59:43 crc kubenswrapper[4948]: I1122 04:59:43.795718 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-m2xpv" event={"ID":"67c508f6-24a6-4683-96e1-e324b8a6f5b8","Type":"ContainerStarted","Data":"08489084340badcd3927f2cb11052f2cfd717cb59e20adf3cfa5c546f7ed336a"} Nov 22 04:59:46 crc kubenswrapper[4948]: I1122 04:59:46.821401 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-m2xpv" event={"ID":"67c508f6-24a6-4683-96e1-e324b8a6f5b8","Type":"ContainerStarted","Data":"9ec0f27eea060911117054bd5887cef3a61a83a43fc080f070b41558db2df31f"} Nov 22 04:59:46 crc kubenswrapper[4948]: I1122 04:59:46.821936 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-m2xpv" Nov 22 04:59:46 crc kubenswrapper[4948]: I1122 04:59:46.825254 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-gdrxf" event={"ID":"049ab730-c768-4140-98aa-ac16df011ab1","Type":"ContainerStarted","Data":"c6b499512f754a039dbbbfa1490e8c71a4a526d5c9cfefc25b482d460760ce07"} Nov 22 04:59:46 crc kubenswrapper[4948]: I1122 04:59:46.825402 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 04:59:46 crc kubenswrapper[4948]: I1122 04:59:46.840744 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-m2xpv" podStartSLOduration=3.548134155 podStartE2EDuration="5.840724751s" podCreationTimestamp="2025-11-22 04:59:41 +0000 UTC" firstStartedPulling="2025-11-22 04:59:43.628680804 +0000 UTC m=+786.314691320" lastFinishedPulling="2025-11-22 04:59:45.9212714 +0000 UTC m=+788.607281916" observedRunningTime="2025-11-22 04:59:46.836584153 +0000 UTC m=+789.522594669" watchObservedRunningTime="2025-11-22 04:59:46.840724751 +0000 UTC m=+789.526735277" Nov 22 04:59:46 crc kubenswrapper[4948]: I1122 04:59:46.859279 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-gdrxf" podStartSLOduration=1.974959311 podStartE2EDuration="5.859237234s" podCreationTimestamp="2025-11-22 04:59:41 +0000 UTC" firstStartedPulling="2025-11-22 04:59:42.030883374 +0000 UTC m=+784.716893890" lastFinishedPulling="2025-11-22 04:59:45.915161297 +0000 UTC m=+788.601171813" observedRunningTime="2025-11-22 04:59:46.854032007 +0000 UTC m=+789.540042523" watchObservedRunningTime="2025-11-22 04:59:46.859237234 +0000 UTC m=+789.545247750" Nov 22 04:59:49 crc kubenswrapper[4948]: I1122 04:59:49.841915 4948 generic.go:334] "Generic (PLEG): container finished" podID="fd5e0eea-0522-4358-88af-b2b648549f7b" containerID="aaf1f639d84e1810ddf16d9ce9177922b29e8220576468badf860e330d08bb30" exitCode=0 Nov 22 04:59:49 crc kubenswrapper[4948]: I1122 04:59:49.842156 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerDied","Data":"aaf1f639d84e1810ddf16d9ce9177922b29e8220576468badf860e330d08bb30"} Nov 22 04:59:49 crc kubenswrapper[4948]: I1122 04:59:49.844049 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" event={"ID":"0d29ddea-72d4-4194-8050-8e302d8000a1","Type":"ContainerStarted","Data":"3c36e3956d008de31f701290e661e019b24135429d7a9b130249dd64ccdd5304"} Nov 22 04:59:49 crc kubenswrapper[4948]: I1122 04:59:49.844182 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 04:59:49 crc kubenswrapper[4948]: I1122 04:59:49.887492 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" podStartSLOduration=1.677798774 podStartE2EDuration="8.887460649s" podCreationTimestamp="2025-11-22 04:59:41 +0000 UTC" firstStartedPulling="2025-11-22 04:59:41.821102239 +0000 UTC m=+784.507112755" lastFinishedPulling="2025-11-22 04:59:49.030764114 +0000 UTC m=+791.716774630" observedRunningTime="2025-11-22 04:59:49.88572418 +0000 UTC m=+792.571734696" watchObservedRunningTime="2025-11-22 04:59:49.887460649 +0000 UTC m=+792.573471165" Nov 22 04:59:50 crc kubenswrapper[4948]: I1122 04:59:50.852948 4948 generic.go:334] "Generic (PLEG): container finished" podID="fd5e0eea-0522-4358-88af-b2b648549f7b" containerID="6bbb7b01079d10547ee381d772d4b869895183294ca2c950b41955e334db6b44" exitCode=0 Nov 22 04:59:50 crc kubenswrapper[4948]: I1122 04:59:50.853038 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerDied","Data":"6bbb7b01079d10547ee381d772d4b869895183294ca2c950b41955e334db6b44"} Nov 22 04:59:51 crc kubenswrapper[4948]: I1122 04:59:51.861592 4948 generic.go:334] "Generic (PLEG): container finished" podID="fd5e0eea-0522-4358-88af-b2b648549f7b" containerID="f77a948fd41334ccc6e8c355432e0ea85e102f6492dbf2224e5b0829b9bc1df6" exitCode=0 Nov 22 04:59:51 crc kubenswrapper[4948]: I1122 04:59:51.861769 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerDied","Data":"f77a948fd41334ccc6e8c355432e0ea85e102f6492dbf2224e5b0829b9bc1df6"} Nov 22 04:59:52 crc kubenswrapper[4948]: I1122 04:59:52.876729 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerStarted","Data":"9a700a9549ec6e364ecf7da2d7f7f2658fc5e7f1d38034d9eccdc6fab4663a7c"} Nov 22 04:59:52 crc kubenswrapper[4948]: I1122 04:59:52.877091 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerStarted","Data":"9c74f1c41750f5d3954c0eb916432eec954e56476251e6e07281af0c095ef53c"} Nov 22 04:59:52 crc kubenswrapper[4948]: I1122 04:59:52.877106 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerStarted","Data":"d5e75ce095442ad290bb3fdc21fd4f062cc21a9f636644dc136d3197b3fe67c5"} Nov 22 04:59:52 crc kubenswrapper[4948]: I1122 04:59:52.877120 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerStarted","Data":"337dc875123ddb1864dbc574125c88a7f9abb48ae8e81eb432c505736046b94d"} Nov 22 04:59:52 crc kubenswrapper[4948]: I1122 04:59:52.877132 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerStarted","Data":"e4157832acb30580a83c4fae9eb8cef345a5c3dbb13a526d31942843336cf420"} Nov 22 04:59:53 crc kubenswrapper[4948]: I1122 04:59:53.270423 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-m2xpv" Nov 22 04:59:53 crc kubenswrapper[4948]: I1122 04:59:53.887584 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-fnfgf" event={"ID":"fd5e0eea-0522-4358-88af-b2b648549f7b","Type":"ContainerStarted","Data":"62065b61618818d47d213e84ed4ef59af9095917f77352e8f6311bae07141044"} Nov 22 04:59:53 crc kubenswrapper[4948]: I1122 04:59:53.887808 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:53 crc kubenswrapper[4948]: I1122 04:59:53.914279 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-fnfgf" podStartSLOduration=5.937771534 podStartE2EDuration="12.914255612s" podCreationTimestamp="2025-11-22 04:59:41 +0000 UTC" firstStartedPulling="2025-11-22 04:59:42.093995129 +0000 UTC m=+784.780005645" lastFinishedPulling="2025-11-22 04:59:49.070479207 +0000 UTC m=+791.756489723" observedRunningTime="2025-11-22 04:59:53.910192867 +0000 UTC m=+796.596203413" watchObservedRunningTime="2025-11-22 04:59:53.914255612 +0000 UTC m=+796.600266158" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.571438 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhc5"] Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.572701 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.585400 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhc5"] Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.748930 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-utilities\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.748968 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42m2f\" (UniqueName: \"kubernetes.io/projected/44685581-0e0a-4d82-8a4a-4c69d64a896a-kube-api-access-42m2f\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.749022 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-catalog-content\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.857783 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-catalog-content\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.857975 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-utilities\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.858004 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42m2f\" (UniqueName: \"kubernetes.io/projected/44685581-0e0a-4d82-8a4a-4c69d64a896a-kube-api-access-42m2f\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.858788 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-catalog-content\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.859054 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-utilities\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.883392 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42m2f\" (UniqueName: \"kubernetes.io/projected/44685581-0e0a-4d82-8a4a-4c69d64a896a-kube-api-access-42m2f\") pod \"redhat-marketplace-2hhc5\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:55 crc kubenswrapper[4948]: I1122 04:59:55.889954 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 04:59:56 crc kubenswrapper[4948]: I1122 04:59:56.328937 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhc5"] Nov 22 04:59:56 crc kubenswrapper[4948]: W1122 04:59:56.339611 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44685581_0e0a_4d82_8a4a_4c69d64a896a.slice/crio-d2208f9561c9c4d7b75841cc9cbb01b57a4e920b20747c11e41a08457513ab43 WatchSource:0}: Error finding container d2208f9561c9c4d7b75841cc9cbb01b57a4e920b20747c11e41a08457513ab43: Status 404 returned error can't find the container with id d2208f9561c9c4d7b75841cc9cbb01b57a4e920b20747c11e41a08457513ab43 Nov 22 04:59:56 crc kubenswrapper[4948]: I1122 04:59:56.907988 4948 generic.go:334] "Generic (PLEG): container finished" podID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerID="855065f634524646644727fa5ceb969ef21339e7f5ab3abef8a735e025aba8df" exitCode=0 Nov 22 04:59:56 crc kubenswrapper[4948]: I1122 04:59:56.908030 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhc5" event={"ID":"44685581-0e0a-4d82-8a4a-4c69d64a896a","Type":"ContainerDied","Data":"855065f634524646644727fa5ceb969ef21339e7f5ab3abef8a735e025aba8df"} Nov 22 04:59:56 crc kubenswrapper[4948]: I1122 04:59:56.908055 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhc5" event={"ID":"44685581-0e0a-4d82-8a4a-4c69d64a896a","Type":"ContainerStarted","Data":"d2208f9561c9c4d7b75841cc9cbb01b57a4e920b20747c11e41a08457513ab43"} Nov 22 04:59:56 crc kubenswrapper[4948]: I1122 04:59:56.997361 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:57 crc kubenswrapper[4948]: I1122 04:59:57.035416 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-fnfgf" Nov 22 04:59:58 crc kubenswrapper[4948]: I1122 04:59:58.924370 4948 generic.go:334] "Generic (PLEG): container finished" podID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerID="c6af0718dc59123a11cd1ed6976c827c31df3b6f8a0e243cd3dae89d005c3148" exitCode=0 Nov 22 04:59:58 crc kubenswrapper[4948]: I1122 04:59:58.924409 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhc5" event={"ID":"44685581-0e0a-4d82-8a4a-4c69d64a896a","Type":"ContainerDied","Data":"c6af0718dc59123a11cd1ed6976c827c31df3b6f8a0e243cd3dae89d005c3148"} Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.154494 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x"] Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.160901 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x"] Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.160999 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.163954 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.164151 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.313676 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vz6fz\" (UniqueName: \"kubernetes.io/projected/51fc3a75-3df9-4476-978b-2284727b0eca-kube-api-access-vz6fz\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.313753 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/51fc3a75-3df9-4476-978b-2284727b0eca-secret-volume\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.313775 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/51fc3a75-3df9-4476-978b-2284727b0eca-config-volume\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.415092 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vz6fz\" (UniqueName: \"kubernetes.io/projected/51fc3a75-3df9-4476-978b-2284727b0eca-kube-api-access-vz6fz\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.415155 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/51fc3a75-3df9-4476-978b-2284727b0eca-secret-volume\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.415183 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/51fc3a75-3df9-4476-978b-2284727b0eca-config-volume\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.416175 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/51fc3a75-3df9-4476-978b-2284727b0eca-config-volume\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.420092 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/51fc3a75-3df9-4476-978b-2284727b0eca-secret-volume\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.436674 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vz6fz\" (UniqueName: \"kubernetes.io/projected/51fc3a75-3df9-4476-978b-2284727b0eca-kube-api-access-vz6fz\") pod \"collect-profiles-29396460-4867x\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.490868 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.909114 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x"] Nov 22 05:00:00 crc kubenswrapper[4948]: W1122 05:00:00.920432 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51fc3a75_3df9_4476_978b_2284727b0eca.slice/crio-a392dc25350b8edabcba94f5aa865b672f2e16875ce994373f57770781fbded0 WatchSource:0}: Error finding container a392dc25350b8edabcba94f5aa865b672f2e16875ce994373f57770781fbded0: Status 404 returned error can't find the container with id a392dc25350b8edabcba94f5aa865b672f2e16875ce994373f57770781fbded0 Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.947024 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhc5" event={"ID":"44685581-0e0a-4d82-8a4a-4c69d64a896a","Type":"ContainerStarted","Data":"e97a1a24523fc3c763268f7d2c5d08bbdc44dba0279b0919f2cf1ad99c3c55cf"} Nov 22 05:00:00 crc kubenswrapper[4948]: I1122 05:00:00.949103 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" event={"ID":"51fc3a75-3df9-4476-978b-2284727b0eca","Type":"ContainerStarted","Data":"a392dc25350b8edabcba94f5aa865b672f2e16875ce994373f57770781fbded0"} Nov 22 05:00:01 crc kubenswrapper[4948]: I1122 05:00:01.388151 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-d8hlb" Nov 22 05:00:01 crc kubenswrapper[4948]: I1122 05:00:01.402609 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2hhc5" podStartSLOduration=3.448505861 podStartE2EDuration="6.402583638s" podCreationTimestamp="2025-11-22 04:59:55 +0000 UTC" firstStartedPulling="2025-11-22 04:59:56.909774312 +0000 UTC m=+799.595784828" lastFinishedPulling="2025-11-22 04:59:59.863852079 +0000 UTC m=+802.549862605" observedRunningTime="2025-11-22 05:00:00.972701147 +0000 UTC m=+803.658711673" watchObservedRunningTime="2025-11-22 05:00:01.402583638 +0000 UTC m=+804.088594194" Nov 22 05:00:01 crc kubenswrapper[4948]: I1122 05:00:01.488018 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-gdrxf" Nov 22 05:00:01 crc kubenswrapper[4948]: I1122 05:00:01.957128 4948 generic.go:334] "Generic (PLEG): container finished" podID="51fc3a75-3df9-4476-978b-2284727b0eca" containerID="db23e43353a30cafc6b5c329420bf9d28bc0dec22d7508cfe8c638275d917d1f" exitCode=0 Nov 22 05:00:01 crc kubenswrapper[4948]: I1122 05:00:01.957219 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" event={"ID":"51fc3a75-3df9-4476-978b-2284727b0eca","Type":"ContainerDied","Data":"db23e43353a30cafc6b5c329420bf9d28bc0dec22d7508cfe8c638275d917d1f"} Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.754084 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-l6n8g"] Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.755615 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-l6n8g" Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.757482 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-index-dockercfg-kdlcf" Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.757537 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.757854 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.776927 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-l6n8g"] Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.855728 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxx64\" (UniqueName: \"kubernetes.io/projected/1937b1f2-7af6-41cd-8a3d-79d83fe3371d-kube-api-access-gxx64\") pod \"mariadb-operator-index-l6n8g\" (UID: \"1937b1f2-7af6-41cd-8a3d-79d83fe3371d\") " pod="openstack-operators/mariadb-operator-index-l6n8g" Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.957084 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxx64\" (UniqueName: \"kubernetes.io/projected/1937b1f2-7af6-41cd-8a3d-79d83fe3371d-kube-api-access-gxx64\") pod \"mariadb-operator-index-l6n8g\" (UID: \"1937b1f2-7af6-41cd-8a3d-79d83fe3371d\") " pod="openstack-operators/mariadb-operator-index-l6n8g" Nov 22 05:00:02 crc kubenswrapper[4948]: I1122 05:00:02.986111 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxx64\" (UniqueName: \"kubernetes.io/projected/1937b1f2-7af6-41cd-8a3d-79d83fe3371d-kube-api-access-gxx64\") pod \"mariadb-operator-index-l6n8g\" (UID: \"1937b1f2-7af6-41cd-8a3d-79d83fe3371d\") " pod="openstack-operators/mariadb-operator-index-l6n8g" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.075248 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-l6n8g" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.239352 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.282396 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-l6n8g"] Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.362724 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/51fc3a75-3df9-4476-978b-2284727b0eca-config-volume\") pod \"51fc3a75-3df9-4476-978b-2284727b0eca\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.362772 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/51fc3a75-3df9-4476-978b-2284727b0eca-secret-volume\") pod \"51fc3a75-3df9-4476-978b-2284727b0eca\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.362843 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vz6fz\" (UniqueName: \"kubernetes.io/projected/51fc3a75-3df9-4476-978b-2284727b0eca-kube-api-access-vz6fz\") pod \"51fc3a75-3df9-4476-978b-2284727b0eca\" (UID: \"51fc3a75-3df9-4476-978b-2284727b0eca\") " Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.363875 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51fc3a75-3df9-4476-978b-2284727b0eca-config-volume" (OuterVolumeSpecName: "config-volume") pod "51fc3a75-3df9-4476-978b-2284727b0eca" (UID: "51fc3a75-3df9-4476-978b-2284727b0eca"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.368074 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51fc3a75-3df9-4476-978b-2284727b0eca-kube-api-access-vz6fz" (OuterVolumeSpecName: "kube-api-access-vz6fz") pod "51fc3a75-3df9-4476-978b-2284727b0eca" (UID: "51fc3a75-3df9-4476-978b-2284727b0eca"). InnerVolumeSpecName "kube-api-access-vz6fz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.368275 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51fc3a75-3df9-4476-978b-2284727b0eca-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "51fc3a75-3df9-4476-978b-2284727b0eca" (UID: "51fc3a75-3df9-4476-978b-2284727b0eca"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.464803 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/51fc3a75-3df9-4476-978b-2284727b0eca-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.464850 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/51fc3a75-3df9-4476-978b-2284727b0eca-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.464867 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vz6fz\" (UniqueName: \"kubernetes.io/projected/51fc3a75-3df9-4476-978b-2284727b0eca-kube-api-access-vz6fz\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.972956 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" event={"ID":"51fc3a75-3df9-4476-978b-2284727b0eca","Type":"ContainerDied","Data":"a392dc25350b8edabcba94f5aa865b672f2e16875ce994373f57770781fbded0"} Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.973017 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a392dc25350b8edabcba94f5aa865b672f2e16875ce994373f57770781fbded0" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.972989 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396460-4867x" Nov 22 05:00:03 crc kubenswrapper[4948]: I1122 05:00:03.974453 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-l6n8g" event={"ID":"1937b1f2-7af6-41cd-8a3d-79d83fe3371d","Type":"ContainerStarted","Data":"51a1e15d1dd5578d60c38fde55bf005f2ad5f967c858d831b1e670c56c1dd9f0"} Nov 22 05:00:05 crc kubenswrapper[4948]: I1122 05:00:05.890379 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 05:00:05 crc kubenswrapper[4948]: I1122 05:00:05.891110 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 05:00:05 crc kubenswrapper[4948]: I1122 05:00:05.942299 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 05:00:06 crc kubenswrapper[4948]: I1122 05:00:06.038537 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 05:00:07 crc kubenswrapper[4948]: I1122 05:00:07.547435 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-l6n8g"] Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.173431 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-pntvg"] Nov 22 05:00:08 crc kubenswrapper[4948]: E1122 05:00:08.174648 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51fc3a75-3df9-4476-978b-2284727b0eca" containerName="collect-profiles" Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.174681 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51fc3a75-3df9-4476-978b-2284727b0eca" containerName="collect-profiles" Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.175061 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="51fc3a75-3df9-4476-978b-2284727b0eca" containerName="collect-profiles" Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.176777 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.182521 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-pntvg"] Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.261510 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn6gg\" (UniqueName: \"kubernetes.io/projected/71947045-a3b7-4dca-a2e6-c421d6328bc1-kube-api-access-tn6gg\") pod \"mariadb-operator-index-pntvg\" (UID: \"71947045-a3b7-4dca-a2e6-c421d6328bc1\") " pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.363058 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn6gg\" (UniqueName: \"kubernetes.io/projected/71947045-a3b7-4dca-a2e6-c421d6328bc1-kube-api-access-tn6gg\") pod \"mariadb-operator-index-pntvg\" (UID: \"71947045-a3b7-4dca-a2e6-c421d6328bc1\") " pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.395083 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn6gg\" (UniqueName: \"kubernetes.io/projected/71947045-a3b7-4dca-a2e6-c421d6328bc1-kube-api-access-tn6gg\") pod \"mariadb-operator-index-pntvg\" (UID: \"71947045-a3b7-4dca-a2e6-c421d6328bc1\") " pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:08 crc kubenswrapper[4948]: I1122 05:00:08.498553 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:09 crc kubenswrapper[4948]: I1122 05:00:09.751988 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhc5"] Nov 22 05:00:09 crc kubenswrapper[4948]: I1122 05:00:09.752207 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2hhc5" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="registry-server" containerID="cri-o://e97a1a24523fc3c763268f7d2c5d08bbdc44dba0279b0919f2cf1ad99c3c55cf" gracePeriod=2 Nov 22 05:00:10 crc kubenswrapper[4948]: I1122 05:00:10.013351 4948 generic.go:334] "Generic (PLEG): container finished" podID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerID="e97a1a24523fc3c763268f7d2c5d08bbdc44dba0279b0919f2cf1ad99c3c55cf" exitCode=0 Nov 22 05:00:10 crc kubenswrapper[4948]: I1122 05:00:10.013389 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhc5" event={"ID":"44685581-0e0a-4d82-8a4a-4c69d64a896a","Type":"ContainerDied","Data":"e97a1a24523fc3c763268f7d2c5d08bbdc44dba0279b0919f2cf1ad99c3c55cf"} Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.247856 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.406331 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42m2f\" (UniqueName: \"kubernetes.io/projected/44685581-0e0a-4d82-8a4a-4c69d64a896a-kube-api-access-42m2f\") pod \"44685581-0e0a-4d82-8a4a-4c69d64a896a\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.406931 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-utilities\") pod \"44685581-0e0a-4d82-8a4a-4c69d64a896a\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.407094 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-catalog-content\") pod \"44685581-0e0a-4d82-8a4a-4c69d64a896a\" (UID: \"44685581-0e0a-4d82-8a4a-4c69d64a896a\") " Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.407954 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-utilities" (OuterVolumeSpecName: "utilities") pod "44685581-0e0a-4d82-8a4a-4c69d64a896a" (UID: "44685581-0e0a-4d82-8a4a-4c69d64a896a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.411233 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44685581-0e0a-4d82-8a4a-4c69d64a896a-kube-api-access-42m2f" (OuterVolumeSpecName: "kube-api-access-42m2f") pod "44685581-0e0a-4d82-8a4a-4c69d64a896a" (UID: "44685581-0e0a-4d82-8a4a-4c69d64a896a"). InnerVolumeSpecName "kube-api-access-42m2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.424429 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44685581-0e0a-4d82-8a4a-4c69d64a896a" (UID: "44685581-0e0a-4d82-8a4a-4c69d64a896a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.507933 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.507973 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42m2f\" (UniqueName: \"kubernetes.io/projected/44685581-0e0a-4d82-8a4a-4c69d64a896a-kube-api-access-42m2f\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.507988 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44685581-0e0a-4d82-8a4a-4c69d64a896a-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:11 crc kubenswrapper[4948]: I1122 05:00:11.565755 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-pntvg"] Nov 22 05:00:11 crc kubenswrapper[4948]: W1122 05:00:11.572871 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71947045_a3b7_4dca_a2e6_c421d6328bc1.slice/crio-887d0a0145266ef03fe8d7aa1961265f5a8444674f3de8731cd3f55da6e9c6d8 WatchSource:0}: Error finding container 887d0a0145266ef03fe8d7aa1961265f5a8444674f3de8731cd3f55da6e9c6d8: Status 404 returned error can't find the container with id 887d0a0145266ef03fe8d7aa1961265f5a8444674f3de8731cd3f55da6e9c6d8 Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.000169 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-fnfgf" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.029532 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-pntvg" event={"ID":"71947045-a3b7-4dca-a2e6-c421d6328bc1","Type":"ContainerStarted","Data":"887d0a0145266ef03fe8d7aa1961265f5a8444674f3de8731cd3f55da6e9c6d8"} Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.031702 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-l6n8g" event={"ID":"1937b1f2-7af6-41cd-8a3d-79d83fe3371d","Type":"ContainerStarted","Data":"74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599"} Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.031940 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-index-l6n8g" podUID="1937b1f2-7af6-41cd-8a3d-79d83fe3371d" containerName="registry-server" containerID="cri-o://74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599" gracePeriod=2 Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.037033 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2hhc5" event={"ID":"44685581-0e0a-4d82-8a4a-4c69d64a896a","Type":"ContainerDied","Data":"d2208f9561c9c4d7b75841cc9cbb01b57a4e920b20747c11e41a08457513ab43"} Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.037105 4948 scope.go:117] "RemoveContainer" containerID="e97a1a24523fc3c763268f7d2c5d08bbdc44dba0279b0919f2cf1ad99c3c55cf" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.037128 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2hhc5" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.054709 4948 scope.go:117] "RemoveContainer" containerID="c6af0718dc59123a11cd1ed6976c827c31df3b6f8a0e243cd3dae89d005c3148" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.068640 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-l6n8g" podStartSLOduration=2.040263215 podStartE2EDuration="10.068624448s" podCreationTimestamp="2025-11-22 05:00:02 +0000 UTC" firstStartedPulling="2025-11-22 05:00:03.291863444 +0000 UTC m=+805.977873970" lastFinishedPulling="2025-11-22 05:00:11.320224687 +0000 UTC m=+814.006235203" observedRunningTime="2025-11-22 05:00:12.057146354 +0000 UTC m=+814.743156870" watchObservedRunningTime="2025-11-22 05:00:12.068624448 +0000 UTC m=+814.754634964" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.069968 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhc5"] Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.074738 4948 scope.go:117] "RemoveContainer" containerID="855065f634524646644727fa5ceb969ef21339e7f5ab3abef8a735e025aba8df" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.078450 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2hhc5"] Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.386146 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-l6n8g" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.518702 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxx64\" (UniqueName: \"kubernetes.io/projected/1937b1f2-7af6-41cd-8a3d-79d83fe3371d-kube-api-access-gxx64\") pod \"1937b1f2-7af6-41cd-8a3d-79d83fe3371d\" (UID: \"1937b1f2-7af6-41cd-8a3d-79d83fe3371d\") " Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.523715 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1937b1f2-7af6-41cd-8a3d-79d83fe3371d-kube-api-access-gxx64" (OuterVolumeSpecName: "kube-api-access-gxx64") pod "1937b1f2-7af6-41cd-8a3d-79d83fe3371d" (UID: "1937b1f2-7af6-41cd-8a3d-79d83fe3371d"). InnerVolumeSpecName "kube-api-access-gxx64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:00:12 crc kubenswrapper[4948]: I1122 05:00:12.620146 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxx64\" (UniqueName: \"kubernetes.io/projected/1937b1f2-7af6-41cd-8a3d-79d83fe3371d-kube-api-access-gxx64\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.045408 4948 generic.go:334] "Generic (PLEG): container finished" podID="1937b1f2-7af6-41cd-8a3d-79d83fe3371d" containerID="74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599" exitCode=0 Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.045485 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-l6n8g" Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.045527 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-l6n8g" event={"ID":"1937b1f2-7af6-41cd-8a3d-79d83fe3371d","Type":"ContainerDied","Data":"74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599"} Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.045908 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-l6n8g" event={"ID":"1937b1f2-7af6-41cd-8a3d-79d83fe3371d","Type":"ContainerDied","Data":"51a1e15d1dd5578d60c38fde55bf005f2ad5f967c858d831b1e670c56c1dd9f0"} Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.045933 4948 scope.go:117] "RemoveContainer" containerID="74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599" Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.070009 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-l6n8g"] Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.074164 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-index-l6n8g"] Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.089714 4948 scope.go:117] "RemoveContainer" containerID="74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599" Nov 22 05:00:13 crc kubenswrapper[4948]: E1122 05:00:13.090156 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599\": container with ID starting with 74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599 not found: ID does not exist" containerID="74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599" Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.090202 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599"} err="failed to get container status \"74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599\": rpc error: code = NotFound desc = could not find container \"74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599\": container with ID starting with 74710ce06823ad1b0ff74d27ed816fc68a84dd7ac9fe34cf67aca0dc949d5599 not found: ID does not exist" Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.766147 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1937b1f2-7af6-41cd-8a3d-79d83fe3371d" path="/var/lib/kubelet/pods/1937b1f2-7af6-41cd-8a3d-79d83fe3371d/volumes" Nov 22 05:00:13 crc kubenswrapper[4948]: I1122 05:00:13.766979 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" path="/var/lib/kubelet/pods/44685581-0e0a-4d82-8a4a-4c69d64a896a/volumes" Nov 22 05:00:14 crc kubenswrapper[4948]: I1122 05:00:14.056238 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-pntvg" event={"ID":"71947045-a3b7-4dca-a2e6-c421d6328bc1","Type":"ContainerStarted","Data":"8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154"} Nov 22 05:00:14 crc kubenswrapper[4948]: I1122 05:00:14.083438 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-pntvg" podStartSLOduration=4.440366104 podStartE2EDuration="6.083414274s" podCreationTimestamp="2025-11-22 05:00:08 +0000 UTC" firstStartedPulling="2025-11-22 05:00:11.576649391 +0000 UTC m=+814.262659907" lastFinishedPulling="2025-11-22 05:00:13.219697561 +0000 UTC m=+815.905708077" observedRunningTime="2025-11-22 05:00:14.081326635 +0000 UTC m=+816.767337201" watchObservedRunningTime="2025-11-22 05:00:14.083414274 +0000 UTC m=+816.769424810" Nov 22 05:00:18 crc kubenswrapper[4948]: I1122 05:00:18.499936 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:18 crc kubenswrapper[4948]: I1122 05:00:18.500261 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:18 crc kubenswrapper[4948]: I1122 05:00:18.530606 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:19 crc kubenswrapper[4948]: I1122 05:00:19.141626 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.392652 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf"] Nov 22 05:00:20 crc kubenswrapper[4948]: E1122 05:00:20.392896 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="registry-server" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.392910 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="registry-server" Nov 22 05:00:20 crc kubenswrapper[4948]: E1122 05:00:20.392930 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="extract-utilities" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.392940 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="extract-utilities" Nov 22 05:00:20 crc kubenswrapper[4948]: E1122 05:00:20.392950 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="extract-content" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.392959 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="extract-content" Nov 22 05:00:20 crc kubenswrapper[4948]: E1122 05:00:20.392968 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1937b1f2-7af6-41cd-8a3d-79d83fe3371d" containerName="registry-server" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.392975 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1937b1f2-7af6-41cd-8a3d-79d83fe3371d" containerName="registry-server" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.393075 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="44685581-0e0a-4d82-8a4a-4c69d64a896a" containerName="registry-server" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.393087 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1937b1f2-7af6-41cd-8a3d-79d83fe3371d" containerName="registry-server" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.393967 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.400351 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-77z46" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.404151 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf"] Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.531311 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hptm2\" (UniqueName: \"kubernetes.io/projected/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-kube-api-access-hptm2\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.531389 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-bundle\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.531444 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-util\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.633245 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-util\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.633341 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hptm2\" (UniqueName: \"kubernetes.io/projected/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-kube-api-access-hptm2\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.633378 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-bundle\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.633945 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-bundle\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.633942 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-util\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.651883 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hptm2\" (UniqueName: \"kubernetes.io/projected/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-kube-api-access-hptm2\") pod \"798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:20 crc kubenswrapper[4948]: I1122 05:00:20.718859 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:21 crc kubenswrapper[4948]: I1122 05:00:21.178256 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf"] Nov 22 05:00:22 crc kubenswrapper[4948]: I1122 05:00:22.132524 4948 generic.go:334] "Generic (PLEG): container finished" podID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerID="8b0bdf16d7effd19a699ab370e963e192a8e042fa9c96b0668e63d8808984c90" exitCode=0 Nov 22 05:00:22 crc kubenswrapper[4948]: I1122 05:00:22.132610 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" event={"ID":"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2","Type":"ContainerDied","Data":"8b0bdf16d7effd19a699ab370e963e192a8e042fa9c96b0668e63d8808984c90"} Nov 22 05:00:22 crc kubenswrapper[4948]: I1122 05:00:22.132667 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" event={"ID":"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2","Type":"ContainerStarted","Data":"5b49dab8b4eb431150d7853d0f182052cd139bbbd8dfc21ec3d896392f471ab9"} Nov 22 05:00:24 crc kubenswrapper[4948]: I1122 05:00:24.146250 4948 generic.go:334] "Generic (PLEG): container finished" podID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerID="d84d48cb390546092b493267142c741b016bdebd8d244445b8106b8aeb971361" exitCode=0 Nov 22 05:00:24 crc kubenswrapper[4948]: I1122 05:00:24.146314 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" event={"ID":"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2","Type":"ContainerDied","Data":"d84d48cb390546092b493267142c741b016bdebd8d244445b8106b8aeb971361"} Nov 22 05:00:25 crc kubenswrapper[4948]: I1122 05:00:25.160074 4948 generic.go:334] "Generic (PLEG): container finished" podID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerID="d6275a7e4a6def06541aaa05ec7304000dc52dbfb86333c23ebe7adddfe0d3ac" exitCode=0 Nov 22 05:00:25 crc kubenswrapper[4948]: I1122 05:00:25.160137 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" event={"ID":"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2","Type":"ContainerDied","Data":"d6275a7e4a6def06541aaa05ec7304000dc52dbfb86333c23ebe7adddfe0d3ac"} Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.462907 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.522819 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-util\") pod \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.522909 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hptm2\" (UniqueName: \"kubernetes.io/projected/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-kube-api-access-hptm2\") pod \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.523043 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-bundle\") pod \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\" (UID: \"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2\") " Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.523874 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-bundle" (OuterVolumeSpecName: "bundle") pod "222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" (UID: "222cae05-41c0-4d5c-bdd8-2d8f682cf4d2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.529911 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-kube-api-access-hptm2" (OuterVolumeSpecName: "kube-api-access-hptm2") pod "222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" (UID: "222cae05-41c0-4d5c-bdd8-2d8f682cf4d2"). InnerVolumeSpecName "kube-api-access-hptm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.536353 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-util" (OuterVolumeSpecName: "util") pod "222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" (UID: "222cae05-41c0-4d5c-bdd8-2d8f682cf4d2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.625087 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.625149 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-util\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:26 crc kubenswrapper[4948]: I1122 05:00:26.625177 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hptm2\" (UniqueName: \"kubernetes.io/projected/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2-kube-api-access-hptm2\") on node \"crc\" DevicePath \"\"" Nov 22 05:00:27 crc kubenswrapper[4948]: I1122 05:00:27.171677 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" event={"ID":"222cae05-41c0-4d5c-bdd8-2d8f682cf4d2","Type":"ContainerDied","Data":"5b49dab8b4eb431150d7853d0f182052cd139bbbd8dfc21ec3d896392f471ab9"} Nov 22 05:00:27 crc kubenswrapper[4948]: I1122 05:00:27.171723 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b49dab8b4eb431150d7853d0f182052cd139bbbd8dfc21ec3d896392f471ab9" Nov 22 05:00:27 crc kubenswrapper[4948]: I1122 05:00:27.171725 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf" Nov 22 05:00:29 crc kubenswrapper[4948]: I1122 05:00:29.790202 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:00:29 crc kubenswrapper[4948]: I1122 05:00:29.790612 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.917647 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq"] Nov 22 05:00:33 crc kubenswrapper[4948]: E1122 05:00:33.918264 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerName="util" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.918280 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerName="util" Nov 22 05:00:33 crc kubenswrapper[4948]: E1122 05:00:33.918290 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerName="pull" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.918296 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerName="pull" Nov 22 05:00:33 crc kubenswrapper[4948]: E1122 05:00:33.918304 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerName="extract" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.918310 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerName="extract" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.918409 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" containerName="extract" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.918961 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.921213 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.921347 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-service-cert" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.922027 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-tnb2n" Nov 22 05:00:33 crc kubenswrapper[4948]: I1122 05:00:33.941995 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq"] Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.016300 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7zqm\" (UniqueName: \"kubernetes.io/projected/f4b54cb5-1843-4f85-abbb-37274329a537-kube-api-access-t7zqm\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.016602 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-webhook-cert\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.016632 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-apiservice-cert\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.117779 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7zqm\" (UniqueName: \"kubernetes.io/projected/f4b54cb5-1843-4f85-abbb-37274329a537-kube-api-access-t7zqm\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.117922 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-webhook-cert\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.117959 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-apiservice-cert\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.123406 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-webhook-cert\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.124190 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-apiservice-cert\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.132617 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7zqm\" (UniqueName: \"kubernetes.io/projected/f4b54cb5-1843-4f85-abbb-37274329a537-kube-api-access-t7zqm\") pod \"mariadb-operator-controller-manager-74445689fd-8dbqq\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.272164 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:00:34 crc kubenswrapper[4948]: I1122 05:00:34.726645 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq"] Nov 22 05:00:35 crc kubenswrapper[4948]: I1122 05:00:35.223818 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" event={"ID":"f4b54cb5-1843-4f85-abbb-37274329a537","Type":"ContainerStarted","Data":"20049e7f46f68341995f40792be2fa9e65f2a7cd64e7dc2503d3d01f4231579e"} Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.246482 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tftj9"] Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.248331 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.261179 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tftj9"] Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.319936 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-catalog-content\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.320002 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-utilities\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.320038 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njffj\" (UniqueName: \"kubernetes.io/projected/831fb91c-781d-4248-8c8f-a23e2d3a846d-kube-api-access-njffj\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.421214 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-utilities\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.421309 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njffj\" (UniqueName: \"kubernetes.io/projected/831fb91c-781d-4248-8c8f-a23e2d3a846d-kube-api-access-njffj\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.421380 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-catalog-content\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.422197 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-catalog-content\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.422364 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-utilities\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.457975 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njffj\" (UniqueName: \"kubernetes.io/projected/831fb91c-781d-4248-8c8f-a23e2d3a846d-kube-api-access-njffj\") pod \"redhat-operators-tftj9\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:39 crc kubenswrapper[4948]: I1122 05:00:39.568381 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:00:56 crc kubenswrapper[4948]: E1122 05:00:56.655211 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312" Nov 22 05:00:56 crc kubenswrapper[4948]: E1122 05:00:56.655858 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:webhook-server,HostPort:0,ContainerPort:9443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:d0aee837871a9ef85dba47cac91188541681de2d6f000759f7b6d9658de0b2e0,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:mariadb-operator.v0.0.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{268435456 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t7zqm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-74445689fd-8dbqq_openstack-operators(f4b54cb5-1843-4f85-abbb-37274329a537): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 05:00:56 crc kubenswrapper[4948]: I1122 05:00:56.885997 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tftj9"] Nov 22 05:00:57 crc kubenswrapper[4948]: I1122 05:00:57.370060 4948 generic.go:334] "Generic (PLEG): container finished" podID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerID="78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7" exitCode=0 Nov 22 05:00:57 crc kubenswrapper[4948]: I1122 05:00:57.370176 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tftj9" event={"ID":"831fb91c-781d-4248-8c8f-a23e2d3a846d","Type":"ContainerDied","Data":"78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7"} Nov 22 05:00:57 crc kubenswrapper[4948]: I1122 05:00:57.370355 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tftj9" event={"ID":"831fb91c-781d-4248-8c8f-a23e2d3a846d","Type":"ContainerStarted","Data":"4f524274ba722eed09ee10bb87505cf4378c48cf1ba77b4436c7097124dcb47e"} Nov 22 05:00:59 crc kubenswrapper[4948]: I1122 05:00:59.403580 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tftj9" event={"ID":"831fb91c-781d-4248-8c8f-a23e2d3a846d","Type":"ContainerStarted","Data":"9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa"} Nov 22 05:00:59 crc kubenswrapper[4948]: I1122 05:00:59.790514 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:00:59 crc kubenswrapper[4948]: I1122 05:00:59.790583 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:01:00 crc kubenswrapper[4948]: I1122 05:01:00.411009 4948 generic.go:334] "Generic (PLEG): container finished" podID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerID="9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa" exitCode=0 Nov 22 05:01:00 crc kubenswrapper[4948]: I1122 05:01:00.411048 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tftj9" event={"ID":"831fb91c-781d-4248-8c8f-a23e2d3a846d","Type":"ContainerDied","Data":"9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa"} Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.382326 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6bf7h"] Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.385219 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.403811 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bf7h"] Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.423123 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-catalog-content\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.423208 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8b77\" (UniqueName: \"kubernetes.io/projected/ddb4d646-4978-4eff-a7b4-6ef9feba2282-kube-api-access-l8b77\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.423237 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-utilities\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.524295 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8b77\" (UniqueName: \"kubernetes.io/projected/ddb4d646-4978-4eff-a7b4-6ef9feba2282-kube-api-access-l8b77\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.524903 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-utilities\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.525106 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-catalog-content\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.525357 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-utilities\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.525615 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-catalog-content\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.548533 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8b77\" (UniqueName: \"kubernetes.io/projected/ddb4d646-4978-4eff-a7b4-6ef9feba2282-kube-api-access-l8b77\") pod \"certified-operators-6bf7h\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:10 crc kubenswrapper[4948]: I1122 05:01:10.728233 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:18 crc kubenswrapper[4948]: I1122 05:01:18.693388 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bf7h"] Nov 22 05:01:19 crc kubenswrapper[4948]: W1122 05:01:19.441902 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podddb4d646_4978_4eff_a7b4_6ef9feba2282.slice/crio-6932b99019c42045c3960d897a303de28b2d7036ec7e7e7fca09d7fb24e0586e WatchSource:0}: Error finding container 6932b99019c42045c3960d897a303de28b2d7036ec7e7e7fca09d7fb24e0586e: Status 404 returned error can't find the container with id 6932b99019c42045c3960d897a303de28b2d7036ec7e7e7fca09d7fb24e0586e Nov 22 05:01:19 crc kubenswrapper[4948]: I1122 05:01:19.543982 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bf7h" event={"ID":"ddb4d646-4978-4eff-a7b4-6ef9feba2282","Type":"ContainerStarted","Data":"6932b99019c42045c3960d897a303de28b2d7036ec7e7e7fca09d7fb24e0586e"} Nov 22 05:01:20 crc kubenswrapper[4948]: I1122 05:01:20.553664 4948 generic.go:334] "Generic (PLEG): container finished" podID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerID="5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4" exitCode=0 Nov 22 05:01:20 crc kubenswrapper[4948]: I1122 05:01:20.553724 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bf7h" event={"ID":"ddb4d646-4978-4eff-a7b4-6ef9feba2282","Type":"ContainerDied","Data":"5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4"} Nov 22 05:01:20 crc kubenswrapper[4948]: I1122 05:01:20.556518 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tftj9" event={"ID":"831fb91c-781d-4248-8c8f-a23e2d3a846d","Type":"ContainerStarted","Data":"2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf"} Nov 22 05:01:20 crc kubenswrapper[4948]: I1122 05:01:20.613727 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tftj9" podStartSLOduration=22.812632617 podStartE2EDuration="41.613694342s" podCreationTimestamp="2025-11-22 05:00:39 +0000 UTC" firstStartedPulling="2025-11-22 05:00:57.371593776 +0000 UTC m=+860.057604292" lastFinishedPulling="2025-11-22 05:01:16.172655491 +0000 UTC m=+878.858666017" observedRunningTime="2025-11-22 05:01:20.608728035 +0000 UTC m=+883.294738571" watchObservedRunningTime="2025-11-22 05:01:20.613694342 +0000 UTC m=+883.299704918" Nov 22 05:01:21 crc kubenswrapper[4948]: E1122 05:01:21.482734 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" Nov 22 05:01:21 crc kubenswrapper[4948]: I1122 05:01:21.564985 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" event={"ID":"f4b54cb5-1843-4f85-abbb-37274329a537","Type":"ContainerStarted","Data":"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c"} Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.000416 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n52r8"] Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.001805 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.006511 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-catalog-content\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.006584 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-utilities\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.006650 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfddt\" (UniqueName: \"kubernetes.io/projected/7189bf26-aac4-4a1f-940b-0323faefa1cb-kube-api-access-jfddt\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.015721 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n52r8"] Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.108099 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfddt\" (UniqueName: \"kubernetes.io/projected/7189bf26-aac4-4a1f-940b-0323faefa1cb-kube-api-access-jfddt\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.108529 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-catalog-content\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.108650 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-utilities\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.109179 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-catalog-content\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.109447 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-utilities\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.132200 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfddt\" (UniqueName: \"kubernetes.io/projected/7189bf26-aac4-4a1f-940b-0323faefa1cb-kube-api-access-jfddt\") pod \"community-operators-n52r8\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.331170 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:22 crc kubenswrapper[4948]: I1122 05:01:22.739343 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n52r8"] Nov 22 05:01:22 crc kubenswrapper[4948]: W1122 05:01:22.757036 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7189bf26_aac4_4a1f_940b_0323faefa1cb.slice/crio-93d0d3fea1a6180d0ebb9d58becd12ed02ff412427764962578951411a363459 WatchSource:0}: Error finding container 93d0d3fea1a6180d0ebb9d58becd12ed02ff412427764962578951411a363459: Status 404 returned error can't find the container with id 93d0d3fea1a6180d0ebb9d58becd12ed02ff412427764962578951411a363459 Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.581977 4948 generic.go:334] "Generic (PLEG): container finished" podID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerID="e8e4fa9e67754efbe14e088218442033c6744285e80679de02675d6131455e6e" exitCode=0 Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.582375 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n52r8" event={"ID":"7189bf26-aac4-4a1f-940b-0323faefa1cb","Type":"ContainerDied","Data":"e8e4fa9e67754efbe14e088218442033c6744285e80679de02675d6131455e6e"} Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.582414 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n52r8" event={"ID":"7189bf26-aac4-4a1f-940b-0323faefa1cb","Type":"ContainerStarted","Data":"93d0d3fea1a6180d0ebb9d58becd12ed02ff412427764962578951411a363459"} Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.585809 4948 generic.go:334] "Generic (PLEG): container finished" podID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerID="0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66" exitCode=0 Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.585884 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bf7h" event={"ID":"ddb4d646-4978-4eff-a7b4-6ef9feba2282","Type":"ContainerDied","Data":"0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66"} Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.587609 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" event={"ID":"f4b54cb5-1843-4f85-abbb-37274329a537","Type":"ContainerStarted","Data":"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb"} Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.588195 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:01:23 crc kubenswrapper[4948]: I1122 05:01:23.641941 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" podStartSLOduration=2.617141313 podStartE2EDuration="50.64192256s" podCreationTimestamp="2025-11-22 05:00:33 +0000 UTC" firstStartedPulling="2025-11-22 05:00:34.734417342 +0000 UTC m=+837.420427858" lastFinishedPulling="2025-11-22 05:01:22.759198589 +0000 UTC m=+885.445209105" observedRunningTime="2025-11-22 05:01:23.639726049 +0000 UTC m=+886.325736565" watchObservedRunningTime="2025-11-22 05:01:23.64192256 +0000 UTC m=+886.327933076" Nov 22 05:01:24 crc kubenswrapper[4948]: I1122 05:01:24.595550 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bf7h" event={"ID":"ddb4d646-4978-4eff-a7b4-6ef9feba2282","Type":"ContainerStarted","Data":"23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581"} Nov 22 05:01:24 crc kubenswrapper[4948]: I1122 05:01:24.597781 4948 generic.go:334] "Generic (PLEG): container finished" podID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerID="33f8fc1834c2a6baf7f0ecdbe629504701b29bcb0ea3fd2a599a5dae2f48c9ff" exitCode=0 Nov 22 05:01:24 crc kubenswrapper[4948]: I1122 05:01:24.597828 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n52r8" event={"ID":"7189bf26-aac4-4a1f-940b-0323faefa1cb","Type":"ContainerDied","Data":"33f8fc1834c2a6baf7f0ecdbe629504701b29bcb0ea3fd2a599a5dae2f48c9ff"} Nov 22 05:01:24 crc kubenswrapper[4948]: I1122 05:01:24.623178 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6bf7h" podStartSLOduration=11.68911825 podStartE2EDuration="14.623154399s" podCreationTimestamp="2025-11-22 05:01:10 +0000 UTC" firstStartedPulling="2025-11-22 05:01:21.082918039 +0000 UTC m=+883.768928555" lastFinishedPulling="2025-11-22 05:01:24.016954148 +0000 UTC m=+886.702964704" observedRunningTime="2025-11-22 05:01:24.619332673 +0000 UTC m=+887.305343199" watchObservedRunningTime="2025-11-22 05:01:24.623154399 +0000 UTC m=+887.309164925" Nov 22 05:01:25 crc kubenswrapper[4948]: I1122 05:01:25.609806 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n52r8" event={"ID":"7189bf26-aac4-4a1f-940b-0323faefa1cb","Type":"ContainerStarted","Data":"f0c0866a31c468ad830bc4c4b6a396646d42f947ec035dccb290f79134429de0"} Nov 22 05:01:25 crc kubenswrapper[4948]: I1122 05:01:25.628689 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n52r8" podStartSLOduration=3.197724294 podStartE2EDuration="4.628669939s" podCreationTimestamp="2025-11-22 05:01:21 +0000 UTC" firstStartedPulling="2025-11-22 05:01:23.584603372 +0000 UTC m=+886.270613888" lastFinishedPulling="2025-11-22 05:01:25.015549017 +0000 UTC m=+887.701559533" observedRunningTime="2025-11-22 05:01:25.623445045 +0000 UTC m=+888.309455581" watchObservedRunningTime="2025-11-22 05:01:25.628669939 +0000 UTC m=+888.314680465" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.569174 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.569770 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.633022 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.683114 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.790237 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.790540 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.790703 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.791445 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d1063d3de2076e619f45df2f69f8b545ea06fe47defd910d05a953ec8383f798"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 05:01:29 crc kubenswrapper[4948]: I1122 05:01:29.791628 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://d1063d3de2076e619f45df2f69f8b545ea06fe47defd910d05a953ec8383f798" gracePeriod=600 Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.197697 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tftj9"] Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.641397 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="d1063d3de2076e619f45df2f69f8b545ea06fe47defd910d05a953ec8383f798" exitCode=0 Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.641499 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"d1063d3de2076e619f45df2f69f8b545ea06fe47defd910d05a953ec8383f798"} Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.641552 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"82cd1f7ac46cb027948972361fb8a42fe8301bdb56d8d047033cf856f1c72a5c"} Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.641575 4948 scope.go:117] "RemoveContainer" containerID="74c12ce3297a891692a52980f92e3b4d67bbfb18ea9a3348de4d0832ad26ef13" Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.729309 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.729382 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:30 crc kubenswrapper[4948]: I1122 05:01:30.764899 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:31 crc kubenswrapper[4948]: I1122 05:01:31.650183 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tftj9" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="registry-server" containerID="cri-o://2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf" gracePeriod=2 Nov 22 05:01:31 crc kubenswrapper[4948]: I1122 05:01:31.692287 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:31 crc kubenswrapper[4948]: I1122 05:01:31.956189 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.148830 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njffj\" (UniqueName: \"kubernetes.io/projected/831fb91c-781d-4248-8c8f-a23e2d3a846d-kube-api-access-njffj\") pod \"831fb91c-781d-4248-8c8f-a23e2d3a846d\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.148994 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-catalog-content\") pod \"831fb91c-781d-4248-8c8f-a23e2d3a846d\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.149035 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-utilities\") pod \"831fb91c-781d-4248-8c8f-a23e2d3a846d\" (UID: \"831fb91c-781d-4248-8c8f-a23e2d3a846d\") " Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.150441 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-utilities" (OuterVolumeSpecName: "utilities") pod "831fb91c-781d-4248-8c8f-a23e2d3a846d" (UID: "831fb91c-781d-4248-8c8f-a23e2d3a846d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.159677 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/831fb91c-781d-4248-8c8f-a23e2d3a846d-kube-api-access-njffj" (OuterVolumeSpecName: "kube-api-access-njffj") pod "831fb91c-781d-4248-8c8f-a23e2d3a846d" (UID: "831fb91c-781d-4248-8c8f-a23e2d3a846d"). InnerVolumeSpecName "kube-api-access-njffj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.251443 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njffj\" (UniqueName: \"kubernetes.io/projected/831fb91c-781d-4248-8c8f-a23e2d3a846d-kube-api-access-njffj\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.251758 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.265299 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "831fb91c-781d-4248-8c8f-a23e2d3a846d" (UID: "831fb91c-781d-4248-8c8f-a23e2d3a846d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.332641 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.332727 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.353610 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/831fb91c-781d-4248-8c8f-a23e2d3a846d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.380249 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.663369 4948 generic.go:334] "Generic (PLEG): container finished" podID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerID="2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf" exitCode=0 Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.663423 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tftj9" event={"ID":"831fb91c-781d-4248-8c8f-a23e2d3a846d","Type":"ContainerDied","Data":"2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf"} Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.664994 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tftj9" event={"ID":"831fb91c-781d-4248-8c8f-a23e2d3a846d","Type":"ContainerDied","Data":"4f524274ba722eed09ee10bb87505cf4378c48cf1ba77b4436c7097124dcb47e"} Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.663529 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tftj9" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.665044 4948 scope.go:117] "RemoveContainer" containerID="2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.698496 4948 scope.go:117] "RemoveContainer" containerID="9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.741578 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tftj9"] Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.749023 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tftj9"] Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.752797 4948 scope.go:117] "RemoveContainer" containerID="78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.758094 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.785344 4948 scope.go:117] "RemoveContainer" containerID="2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf" Nov 22 05:01:32 crc kubenswrapper[4948]: E1122 05:01:32.786273 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf\": container with ID starting with 2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf not found: ID does not exist" containerID="2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.786335 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf"} err="failed to get container status \"2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf\": rpc error: code = NotFound desc = could not find container \"2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf\": container with ID starting with 2090258609d05c0f2662395bd50708e31955f226ed26698b8d401b294dfc8ebf not found: ID does not exist" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.786373 4948 scope.go:117] "RemoveContainer" containerID="9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa" Nov 22 05:01:32 crc kubenswrapper[4948]: E1122 05:01:32.786917 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa\": container with ID starting with 9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa not found: ID does not exist" containerID="9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.786974 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa"} err="failed to get container status \"9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa\": rpc error: code = NotFound desc = could not find container \"9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa\": container with ID starting with 9abddc12a47477501dbe346ce14a262f477b409278b8d27fd126dc962b1a0eaa not found: ID does not exist" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.787015 4948 scope.go:117] "RemoveContainer" containerID="78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7" Nov 22 05:01:32 crc kubenswrapper[4948]: E1122 05:01:32.787413 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7\": container with ID starting with 78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7 not found: ID does not exist" containerID="78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.787457 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7"} err="failed to get container status \"78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7\": rpc error: code = NotFound desc = could not find container \"78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7\": container with ID starting with 78ec60e11ff56f84685abf2ba73c197ece0d98a17e3c20619e75d2bd81e601c7 not found: ID does not exist" Nov 22 05:01:32 crc kubenswrapper[4948]: I1122 05:01:32.999721 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bf7h"] Nov 22 05:01:33 crc kubenswrapper[4948]: I1122 05:01:33.676024 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6bf7h" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="registry-server" containerID="cri-o://23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581" gracePeriod=2 Nov 22 05:01:33 crc kubenswrapper[4948]: I1122 05:01:33.767013 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" path="/var/lib/kubelet/pods/831fb91c-781d-4248-8c8f-a23e2d3a846d/volumes" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.058020 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.178112 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-utilities\") pod \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.178577 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-catalog-content\") pod \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.178807 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8b77\" (UniqueName: \"kubernetes.io/projected/ddb4d646-4978-4eff-a7b4-6ef9feba2282-kube-api-access-l8b77\") pod \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\" (UID: \"ddb4d646-4978-4eff-a7b4-6ef9feba2282\") " Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.179820 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-utilities" (OuterVolumeSpecName: "utilities") pod "ddb4d646-4978-4eff-a7b4-6ef9feba2282" (UID: "ddb4d646-4978-4eff-a7b4-6ef9feba2282"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.186369 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddb4d646-4978-4eff-a7b4-6ef9feba2282-kube-api-access-l8b77" (OuterVolumeSpecName: "kube-api-access-l8b77") pod "ddb4d646-4978-4eff-a7b4-6ef9feba2282" (UID: "ddb4d646-4978-4eff-a7b4-6ef9feba2282"). InnerVolumeSpecName "kube-api-access-l8b77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.278367 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.280442 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8b77\" (UniqueName: \"kubernetes.io/projected/ddb4d646-4978-4eff-a7b4-6ef9feba2282-kube-api-access-l8b77\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.280934 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.684057 4948 generic.go:334] "Generic (PLEG): container finished" podID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerID="23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581" exitCode=0 Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.684207 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bf7h" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.684204 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bf7h" event={"ID":"ddb4d646-4978-4eff-a7b4-6ef9feba2282","Type":"ContainerDied","Data":"23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581"} Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.684575 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bf7h" event={"ID":"ddb4d646-4978-4eff-a7b4-6ef9feba2282","Type":"ContainerDied","Data":"6932b99019c42045c3960d897a303de28b2d7036ec7e7e7fca09d7fb24e0586e"} Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.684613 4948 scope.go:117] "RemoveContainer" containerID="23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.704525 4948 scope.go:117] "RemoveContainer" containerID="0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.729132 4948 scope.go:117] "RemoveContainer" containerID="5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.757759 4948 scope.go:117] "RemoveContainer" containerID="23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581" Nov 22 05:01:34 crc kubenswrapper[4948]: E1122 05:01:34.758351 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581\": container with ID starting with 23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581 not found: ID does not exist" containerID="23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.758401 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581"} err="failed to get container status \"23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581\": rpc error: code = NotFound desc = could not find container \"23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581\": container with ID starting with 23749bc50a4f58a1dd8b0dce325c57ba1b4423ac140539760affcac9f8634581 not found: ID does not exist" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.758430 4948 scope.go:117] "RemoveContainer" containerID="0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66" Nov 22 05:01:34 crc kubenswrapper[4948]: E1122 05:01:34.758857 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66\": container with ID starting with 0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66 not found: ID does not exist" containerID="0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.758896 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66"} err="failed to get container status \"0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66\": rpc error: code = NotFound desc = could not find container \"0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66\": container with ID starting with 0baab0b0e4589f1c2b12dca817c3b0912b25ac60771de522925cd12f18d5cb66 not found: ID does not exist" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.758922 4948 scope.go:117] "RemoveContainer" containerID="5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4" Nov 22 05:01:34 crc kubenswrapper[4948]: E1122 05:01:34.759271 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4\": container with ID starting with 5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4 not found: ID does not exist" containerID="5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4" Nov 22 05:01:34 crc kubenswrapper[4948]: I1122 05:01:34.759343 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4"} err="failed to get container status \"5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4\": rpc error: code = NotFound desc = could not find container \"5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4\": container with ID starting with 5324c91f9a02c90c66393419c622a35cb26612ee0f19036addfa3a6025a550f4 not found: ID does not exist" Nov 22 05:01:35 crc kubenswrapper[4948]: I1122 05:01:35.402830 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n52r8"] Nov 22 05:01:35 crc kubenswrapper[4948]: I1122 05:01:35.689697 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n52r8" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="registry-server" containerID="cri-o://f0c0866a31c468ad830bc4c4b6a396646d42f947ec035dccb290f79134429de0" gracePeriod=2 Nov 22 05:01:35 crc kubenswrapper[4948]: I1122 05:01:35.850579 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ddb4d646-4978-4eff-a7b4-6ef9feba2282" (UID: "ddb4d646-4978-4eff-a7b4-6ef9feba2282"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:01:35 crc kubenswrapper[4948]: I1122 05:01:35.903074 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddb4d646-4978-4eff-a7b4-6ef9feba2282-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:35 crc kubenswrapper[4948]: I1122 05:01:35.913806 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bf7h"] Nov 22 05:01:35 crc kubenswrapper[4948]: I1122 05:01:35.917976 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6bf7h"] Nov 22 05:01:37 crc kubenswrapper[4948]: I1122 05:01:37.704682 4948 generic.go:334] "Generic (PLEG): container finished" podID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerID="f0c0866a31c468ad830bc4c4b6a396646d42f947ec035dccb290f79134429de0" exitCode=0 Nov 22 05:01:37 crc kubenswrapper[4948]: I1122 05:01:37.704763 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n52r8" event={"ID":"7189bf26-aac4-4a1f-940b-0323faefa1cb","Type":"ContainerDied","Data":"f0c0866a31c468ad830bc4c4b6a396646d42f947ec035dccb290f79134429de0"} Nov 22 05:01:37 crc kubenswrapper[4948]: I1122 05:01:37.768721 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" path="/var/lib/kubelet/pods/ddb4d646-4978-4eff-a7b4-6ef9feba2282/volumes" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.452599 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.540520 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-catalog-content\") pod \"7189bf26-aac4-4a1f-940b-0323faefa1cb\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.540695 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-utilities\") pod \"7189bf26-aac4-4a1f-940b-0323faefa1cb\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.540794 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfddt\" (UniqueName: \"kubernetes.io/projected/7189bf26-aac4-4a1f-940b-0323faefa1cb-kube-api-access-jfddt\") pod \"7189bf26-aac4-4a1f-940b-0323faefa1cb\" (UID: \"7189bf26-aac4-4a1f-940b-0323faefa1cb\") " Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.541482 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-utilities" (OuterVolumeSpecName: "utilities") pod "7189bf26-aac4-4a1f-940b-0323faefa1cb" (UID: "7189bf26-aac4-4a1f-940b-0323faefa1cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.551814 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7189bf26-aac4-4a1f-940b-0323faefa1cb-kube-api-access-jfddt" (OuterVolumeSpecName: "kube-api-access-jfddt") pod "7189bf26-aac4-4a1f-940b-0323faefa1cb" (UID: "7189bf26-aac4-4a1f-940b-0323faefa1cb"). InnerVolumeSpecName "kube-api-access-jfddt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.589122 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7189bf26-aac4-4a1f-940b-0323faefa1cb" (UID: "7189bf26-aac4-4a1f-940b-0323faefa1cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.643070 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.643114 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7189bf26-aac4-4a1f-940b-0323faefa1cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.643149 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfddt\" (UniqueName: \"kubernetes.io/projected/7189bf26-aac4-4a1f-940b-0323faefa1cb-kube-api-access-jfddt\") on node \"crc\" DevicePath \"\"" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.716908 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n52r8" event={"ID":"7189bf26-aac4-4a1f-940b-0323faefa1cb","Type":"ContainerDied","Data":"93d0d3fea1a6180d0ebb9d58becd12ed02ff412427764962578951411a363459"} Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.716987 4948 scope.go:117] "RemoveContainer" containerID="f0c0866a31c468ad830bc4c4b6a396646d42f947ec035dccb290f79134429de0" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.717023 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n52r8" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.744680 4948 scope.go:117] "RemoveContainer" containerID="33f8fc1834c2a6baf7f0ecdbe629504701b29bcb0ea3fd2a599a5dae2f48c9ff" Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.761126 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n52r8"] Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.765304 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n52r8"] Nov 22 05:01:38 crc kubenswrapper[4948]: I1122 05:01:38.777716 4948 scope.go:117] "RemoveContainer" containerID="e8e4fa9e67754efbe14e088218442033c6744285e80679de02675d6131455e6e" Nov 22 05:01:39 crc kubenswrapper[4948]: I1122 05:01:39.769982 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" path="/var/lib/kubelet/pods/7189bf26-aac4-4a1f-940b-0323faefa1cb/volumes" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.801372 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-hmxsc"] Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.801975 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="extract-content" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.801993 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="extract-content" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802008 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802017 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802028 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="extract-content" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802036 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="extract-content" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802048 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="extract-content" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802056 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="extract-content" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802070 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802078 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802090 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="extract-utilities" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802100 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="extract-utilities" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802115 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="extract-utilities" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802124 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="extract-utilities" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802137 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="extract-utilities" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802145 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="extract-utilities" Nov 22 05:01:41 crc kubenswrapper[4948]: E1122 05:01:41.802155 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802163 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802322 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7189bf26-aac4-4a1f-940b-0323faefa1cb" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802337 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="831fb91c-781d-4248-8c8f-a23e2d3a846d" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802347 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddb4d646-4978-4eff-a7b4-6ef9feba2282" containerName="registry-server" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.802828 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.807186 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-j2jhh" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.813495 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-hmxsc"] Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.893531 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrqhc\" (UniqueName: \"kubernetes.io/projected/9a14e009-500d-4277-9fde-27db0ed6f943-kube-api-access-lrqhc\") pod \"infra-operator-index-hmxsc\" (UID: \"9a14e009-500d-4277-9fde-27db0ed6f943\") " pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:41 crc kubenswrapper[4948]: I1122 05:01:41.994805 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrqhc\" (UniqueName: \"kubernetes.io/projected/9a14e009-500d-4277-9fde-27db0ed6f943-kube-api-access-lrqhc\") pod \"infra-operator-index-hmxsc\" (UID: \"9a14e009-500d-4277-9fde-27db0ed6f943\") " pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:42 crc kubenswrapper[4948]: I1122 05:01:42.025562 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrqhc\" (UniqueName: \"kubernetes.io/projected/9a14e009-500d-4277-9fde-27db0ed6f943-kube-api-access-lrqhc\") pod \"infra-operator-index-hmxsc\" (UID: \"9a14e009-500d-4277-9fde-27db0ed6f943\") " pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:42 crc kubenswrapper[4948]: I1122 05:01:42.127001 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:42 crc kubenswrapper[4948]: I1122 05:01:42.369097 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-hmxsc"] Nov 22 05:01:42 crc kubenswrapper[4948]: I1122 05:01:42.743978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hmxsc" event={"ID":"9a14e009-500d-4277-9fde-27db0ed6f943","Type":"ContainerStarted","Data":"10ebe5a358082beee6b1b058e1a9940207d7c2c55923e98cfb2b0ee0c9b2aee6"} Nov 22 05:01:43 crc kubenswrapper[4948]: I1122 05:01:43.753792 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hmxsc" event={"ID":"9a14e009-500d-4277-9fde-27db0ed6f943","Type":"ContainerStarted","Data":"bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3"} Nov 22 05:01:43 crc kubenswrapper[4948]: I1122 05:01:43.787662 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-hmxsc" podStartSLOduration=1.823197712 podStartE2EDuration="2.787640435s" podCreationTimestamp="2025-11-22 05:01:41 +0000 UTC" firstStartedPulling="2025-11-22 05:01:42.369699751 +0000 UTC m=+905.055710267" lastFinishedPulling="2025-11-22 05:01:43.334142474 +0000 UTC m=+906.020152990" observedRunningTime="2025-11-22 05:01:43.77264315 +0000 UTC m=+906.458653706" watchObservedRunningTime="2025-11-22 05:01:43.787640435 +0000 UTC m=+906.473650951" Nov 22 05:01:52 crc kubenswrapper[4948]: I1122 05:01:52.127566 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:52 crc kubenswrapper[4948]: I1122 05:01:52.128235 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:52 crc kubenswrapper[4948]: I1122 05:01:52.163195 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:52 crc kubenswrapper[4948]: I1122 05:01:52.877425 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.447442 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn"] Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.448856 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.454595 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-77z46" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.470376 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnvrd\" (UniqueName: \"kubernetes.io/projected/6de108e0-d060-402e-8ad0-f52f89e5f155-kube-api-access-jnvrd\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.470463 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-util\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.470525 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-bundle\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.473808 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn"] Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.572218 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-bundle\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.572534 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnvrd\" (UniqueName: \"kubernetes.io/projected/6de108e0-d060-402e-8ad0-f52f89e5f155-kube-api-access-jnvrd\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.572654 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-util\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.573587 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-util\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.574141 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-bundle\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.609555 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnvrd\" (UniqueName: \"kubernetes.io/projected/6de108e0-d060-402e-8ad0-f52f89e5f155-kube-api-access-jnvrd\") pod \"8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.772660 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:01:54 crc kubenswrapper[4948]: I1122 05:01:54.989494 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn"] Nov 22 05:01:55 crc kubenswrapper[4948]: I1122 05:01:55.850037 4948 generic.go:334] "Generic (PLEG): container finished" podID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerID="0e13ae764c94b80cc837dbca0c434f0e97ced9137585a03fc359daa35cd766fa" exitCode=0 Nov 22 05:01:55 crc kubenswrapper[4948]: I1122 05:01:55.850089 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" event={"ID":"6de108e0-d060-402e-8ad0-f52f89e5f155","Type":"ContainerDied","Data":"0e13ae764c94b80cc837dbca0c434f0e97ced9137585a03fc359daa35cd766fa"} Nov 22 05:01:55 crc kubenswrapper[4948]: I1122 05:01:55.850125 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" event={"ID":"6de108e0-d060-402e-8ad0-f52f89e5f155","Type":"ContainerStarted","Data":"edba59e29024788b3663080a8b67345652a3906c3ad352e1258d3d62bbf72f20"} Nov 22 05:01:57 crc kubenswrapper[4948]: I1122 05:01:57.863548 4948 generic.go:334] "Generic (PLEG): container finished" podID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerID="6098e827df5b4893e8a405ba14dc8cad649c37495a422494d31429fad77df783" exitCode=0 Nov 22 05:01:57 crc kubenswrapper[4948]: I1122 05:01:57.863603 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" event={"ID":"6de108e0-d060-402e-8ad0-f52f89e5f155","Type":"ContainerDied","Data":"6098e827df5b4893e8a405ba14dc8cad649c37495a422494d31429fad77df783"} Nov 22 05:01:58 crc kubenswrapper[4948]: I1122 05:01:58.879868 4948 generic.go:334] "Generic (PLEG): container finished" podID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerID="0e9091a8d798c540d74964a3bf68639ff34b916a2aff5bd2e7d453f3c380b82b" exitCode=0 Nov 22 05:01:58 crc kubenswrapper[4948]: I1122 05:01:58.879946 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" event={"ID":"6de108e0-d060-402e-8ad0-f52f89e5f155","Type":"ContainerDied","Data":"0e9091a8d798c540d74964a3bf68639ff34b916a2aff5bd2e7d453f3c380b82b"} Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.203648 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.239112 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-bundle\") pod \"6de108e0-d060-402e-8ad0-f52f89e5f155\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.239158 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnvrd\" (UniqueName: \"kubernetes.io/projected/6de108e0-d060-402e-8ad0-f52f89e5f155-kube-api-access-jnvrd\") pod \"6de108e0-d060-402e-8ad0-f52f89e5f155\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.239191 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-util\") pod \"6de108e0-d060-402e-8ad0-f52f89e5f155\" (UID: \"6de108e0-d060-402e-8ad0-f52f89e5f155\") " Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.240777 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-bundle" (OuterVolumeSpecName: "bundle") pod "6de108e0-d060-402e-8ad0-f52f89e5f155" (UID: "6de108e0-d060-402e-8ad0-f52f89e5f155"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.247514 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6de108e0-d060-402e-8ad0-f52f89e5f155-kube-api-access-jnvrd" (OuterVolumeSpecName: "kube-api-access-jnvrd") pod "6de108e0-d060-402e-8ad0-f52f89e5f155" (UID: "6de108e0-d060-402e-8ad0-f52f89e5f155"). InnerVolumeSpecName "kube-api-access-jnvrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.255375 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-util" (OuterVolumeSpecName: "util") pod "6de108e0-d060-402e-8ad0-f52f89e5f155" (UID: "6de108e0-d060-402e-8ad0-f52f89e5f155"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.340376 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.340459 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnvrd\" (UniqueName: \"kubernetes.io/projected/6de108e0-d060-402e-8ad0-f52f89e5f155-kube-api-access-jnvrd\") on node \"crc\" DevicePath \"\"" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.340505 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6de108e0-d060-402e-8ad0-f52f89e5f155-util\") on node \"crc\" DevicePath \"\"" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.898899 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" event={"ID":"6de108e0-d060-402e-8ad0-f52f89e5f155","Type":"ContainerDied","Data":"edba59e29024788b3663080a8b67345652a3906c3ad352e1258d3d62bbf72f20"} Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.898958 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="edba59e29024788b3663080a8b67345652a3906c3ad352e1258d3d62bbf72f20" Nov 22 05:02:00 crc kubenswrapper[4948]: I1122 05:02:00.899071 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.777197 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92"] Nov 22 05:02:07 crc kubenswrapper[4948]: E1122 05:02:07.778350 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerName="util" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.778369 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerName="util" Nov 22 05:02:07 crc kubenswrapper[4948]: E1122 05:02:07.778384 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerName="extract" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.778392 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerName="extract" Nov 22 05:02:07 crc kubenswrapper[4948]: E1122 05:02:07.778407 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerName="pull" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.778414 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerName="pull" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.778575 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" containerName="extract" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.779270 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.781982 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-gs945" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.782294 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-service-cert" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.808255 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92"] Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.834972 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-apiservice-cert\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.835022 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5q9v\" (UniqueName: \"kubernetes.io/projected/04e5f997-9e92-49d0-9bd2-8635681683cb-kube-api-access-r5q9v\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.835166 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-webhook-cert\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.935946 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-apiservice-cert\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.936009 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5q9v\" (UniqueName: \"kubernetes.io/projected/04e5f997-9e92-49d0-9bd2-8635681683cb-kube-api-access-r5q9v\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.936097 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-webhook-cert\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.942421 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-webhook-cert\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.952847 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-apiservice-cert\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:07 crc kubenswrapper[4948]: I1122 05:02:07.957836 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5q9v\" (UniqueName: \"kubernetes.io/projected/04e5f997-9e92-49d0-9bd2-8635681683cb-kube-api-access-r5q9v\") pod \"infra-operator-controller-manager-7c4897d696-rxn92\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:08 crc kubenswrapper[4948]: I1122 05:02:08.107102 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:08 crc kubenswrapper[4948]: I1122 05:02:08.561750 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92"] Nov 22 05:02:08 crc kubenswrapper[4948]: I1122 05:02:08.941520 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" event={"ID":"04e5f997-9e92-49d0-9bd2-8635681683cb","Type":"ContainerStarted","Data":"a926a3ac61e5f8164e38a96b945e05b7c607238158110b3bf7fedf059b9294d4"} Nov 22 05:02:10 crc kubenswrapper[4948]: I1122 05:02:10.952828 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" event={"ID":"04e5f997-9e92-49d0-9bd2-8635681683cb","Type":"ContainerStarted","Data":"facc8f6c0e0406f0753944f4d4dadff7e2665fdf9707e08eba9cc5251ac5c036"} Nov 22 05:02:10 crc kubenswrapper[4948]: I1122 05:02:10.953549 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" event={"ID":"04e5f997-9e92-49d0-9bd2-8635681683cb","Type":"ContainerStarted","Data":"5e9fc957d3e1d7e44ff5f79c185e1e76b71e4167883350cc7d86ddb3789c860a"} Nov 22 05:02:10 crc kubenswrapper[4948]: I1122 05:02:10.953579 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:10 crc kubenswrapper[4948]: I1122 05:02:10.975590 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" podStartSLOduration=2.338392025 podStartE2EDuration="3.975571895s" podCreationTimestamp="2025-11-22 05:02:07 +0000 UTC" firstStartedPulling="2025-11-22 05:02:08.569910997 +0000 UTC m=+931.255921513" lastFinishedPulling="2025-11-22 05:02:10.207090877 +0000 UTC m=+932.893101383" observedRunningTime="2025-11-22 05:02:10.970399109 +0000 UTC m=+933.656409625" watchObservedRunningTime="2025-11-22 05:02:10.975571895 +0000 UTC m=+933.661582411" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.476979 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/openstack-galera-0"] Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.478408 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.482889 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"manila-kuttl-tests"/"kube-root-ca.crt" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.487412 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"manila-kuttl-tests"/"openstack-config-data" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.487799 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"manila-kuttl-tests"/"openshift-service-ca.crt" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.488597 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/openstack-galera-2"] Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.490272 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.491967 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/openstack-galera-1"] Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.492241 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"osp-secret" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.492972 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"manila-kuttl-tests"/"openstack-scripts" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.493050 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.493156 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"galera-openstack-dockercfg-5vrhp" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.507269 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/openstack-galera-0"] Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.511737 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/openstack-galera-2"] Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.522101 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/openstack-galera-1"] Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537342 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-default\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537401 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/9b8859c7-31db-4617-acb7-096f251ea3ae-secrets\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537440 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537505 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq8ck\" (UniqueName: \"kubernetes.io/projected/9b8859c7-31db-4617-acb7-096f251ea3ae-kube-api-access-tq8ck\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537540 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-operator-scripts\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537571 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-kolla-config\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537599 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-generated\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537628 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-default\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537704 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/edc1757b-b350-47aa-94bd-a3f479b8d0ce-secrets\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537734 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kolla-config\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537764 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-operator-scripts\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537800 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-generated\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537837 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct8tc\" (UniqueName: \"kubernetes.io/projected/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kube-api-access-ct8tc\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.537887 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638676 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq8ck\" (UniqueName: \"kubernetes.io/projected/9b8859c7-31db-4617-acb7-096f251ea3ae-kube-api-access-tq8ck\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638730 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-operator-scripts\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638761 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-kolla-config\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638784 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-generated\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638808 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-default\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638842 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/edc1757b-b350-47aa-94bd-a3f479b8d0ce-secrets\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638866 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kolla-config\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638884 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-operator-scripts\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638917 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-operator-scripts\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638942 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-generated\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638970 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62fpg\" (UniqueName: \"kubernetes.io/projected/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kube-api-access-62fpg\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.638993 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct8tc\" (UniqueName: \"kubernetes.io/projected/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kube-api-access-ct8tc\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639041 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639063 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-generated\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639098 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kolla-config\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639123 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639149 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-default\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639173 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/9b8859c7-31db-4617-acb7-096f251ea3ae-secrets\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639195 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-secrets\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639223 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639248 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-default\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639739 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-generated\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.639908 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") device mount path \"/mnt/openstack/pv01\"" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.640046 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-kolla-config\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.640431 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-generated\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.640477 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-operator-scripts\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.640869 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-default\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.641148 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-default\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.642638 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kolla-config\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.643006 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") device mount path \"/mnt/openstack/pv10\"" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.643772 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-operator-scripts\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.648433 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/edc1757b-b350-47aa-94bd-a3f479b8d0ce-secrets\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.649792 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/9b8859c7-31db-4617-acb7-096f251ea3ae-secrets\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.659193 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.661478 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct8tc\" (UniqueName: \"kubernetes.io/projected/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kube-api-access-ct8tc\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.661647 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.662906 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq8ck\" (UniqueName: \"kubernetes.io/projected/9b8859c7-31db-4617-acb7-096f251ea3ae-kube-api-access-tq8ck\") pod \"openstack-galera-2\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.748027 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62fpg\" (UniqueName: \"kubernetes.io/projected/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kube-api-access-62fpg\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.748133 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-generated\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.748187 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kolla-config\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.748236 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.748296 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-secrets\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.748345 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-default\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.748432 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-operator-scripts\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.749185 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kolla-config\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.749390 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") device mount path \"/mnt/openstack/pv02\"" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.749431 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-generated\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.749677 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-default\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.751504 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-operator-scripts\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.753453 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-secrets\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.767255 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62fpg\" (UniqueName: \"kubernetes.io/projected/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kube-api-access-62fpg\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.771814 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.806786 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.823148 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:14 crc kubenswrapper[4948]: I1122 05:02:14.843060 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:15 crc kubenswrapper[4948]: I1122 05:02:15.098947 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/openstack-galera-1"] Nov 22 05:02:15 crc kubenswrapper[4948]: W1122 05:02:15.104272 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5a6d29f_cfab_405b_9b3f_d95e8a1bacb0.slice/crio-affcd4ee5d462d00d5151c04bd6b2ee25cc193d1a1e4b3f9c1431769246f98da WatchSource:0}: Error finding container affcd4ee5d462d00d5151c04bd6b2ee25cc193d1a1e4b3f9c1431769246f98da: Status 404 returned error can't find the container with id affcd4ee5d462d00d5151c04bd6b2ee25cc193d1a1e4b3f9c1431769246f98da Nov 22 05:02:15 crc kubenswrapper[4948]: I1122 05:02:15.250497 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/openstack-galera-2"] Nov 22 05:02:15 crc kubenswrapper[4948]: I1122 05:02:15.253632 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/openstack-galera-0"] Nov 22 05:02:15 crc kubenswrapper[4948]: W1122 05:02:15.256400 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b8859c7_31db_4617_acb7_096f251ea3ae.slice/crio-17fc7c80ae8732c48dbf9375d6fbd976e035f5b6bcef093ee86c2f3ffd899533 WatchSource:0}: Error finding container 17fc7c80ae8732c48dbf9375d6fbd976e035f5b6bcef093ee86c2f3ffd899533: Status 404 returned error can't find the container with id 17fc7c80ae8732c48dbf9375d6fbd976e035f5b6bcef093ee86c2f3ffd899533 Nov 22 05:02:15 crc kubenswrapper[4948]: W1122 05:02:15.259053 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedc1757b_b350_47aa_94bd_a3f479b8d0ce.slice/crio-1a1b7b694c3b4174f83dc51b00a2e61dd1e90dae1fb82b11281b9babc97d2011 WatchSource:0}: Error finding container 1a1b7b694c3b4174f83dc51b00a2e61dd1e90dae1fb82b11281b9babc97d2011: Status 404 returned error can't find the container with id 1a1b7b694c3b4174f83dc51b00a2e61dd1e90dae1fb82b11281b9babc97d2011 Nov 22 05:02:15 crc kubenswrapper[4948]: I1122 05:02:15.982726 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-2" event={"ID":"9b8859c7-31db-4617-acb7-096f251ea3ae","Type":"ContainerStarted","Data":"17fc7c80ae8732c48dbf9375d6fbd976e035f5b6bcef093ee86c2f3ffd899533"} Nov 22 05:02:15 crc kubenswrapper[4948]: I1122 05:02:15.984458 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-1" event={"ID":"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0","Type":"ContainerStarted","Data":"affcd4ee5d462d00d5151c04bd6b2ee25cc193d1a1e4b3f9c1431769246f98da"} Nov 22 05:02:15 crc kubenswrapper[4948]: I1122 05:02:15.986069 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-0" event={"ID":"edc1757b-b350-47aa-94bd-a3f479b8d0ce","Type":"ContainerStarted","Data":"1a1b7b694c3b4174f83dc51b00a2e61dd1e90dae1fb82b11281b9babc97d2011"} Nov 22 05:02:18 crc kubenswrapper[4948]: I1122 05:02:18.111050 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.185973 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/memcached-0"] Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.187534 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.189372 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"manila-kuttl-tests"/"memcached-config-data" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.189836 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"memcached-memcached-dockercfg-l6rj5" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.202849 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/memcached-0"] Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.365745 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kv6hh\" (UniqueName: \"kubernetes.io/projected/2605c37d-54b8-4424-8068-ff0350f44403-kube-api-access-kv6hh\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.365815 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-kolla-config\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.366485 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-config-data\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.468020 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-config-data\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.468131 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kv6hh\" (UniqueName: \"kubernetes.io/projected/2605c37d-54b8-4424-8068-ff0350f44403-kube-api-access-kv6hh\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.468163 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-kolla-config\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.468913 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-config-data\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.468983 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-kolla-config\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.501264 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kv6hh\" (UniqueName: \"kubernetes.io/projected/2605c37d-54b8-4424-8068-ff0350f44403-kube-api-access-kv6hh\") pod \"memcached-0\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:23 crc kubenswrapper[4948]: I1122 05:02:23.569955 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:24 crc kubenswrapper[4948]: I1122 05:02:24.035057 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-0" event={"ID":"edc1757b-b350-47aa-94bd-a3f479b8d0ce","Type":"ContainerStarted","Data":"02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3"} Nov 22 05:02:24 crc kubenswrapper[4948]: I1122 05:02:24.054197 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-2" event={"ID":"9b8859c7-31db-4617-acb7-096f251ea3ae","Type":"ContainerStarted","Data":"0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb"} Nov 22 05:02:24 crc kubenswrapper[4948]: I1122 05:02:24.157364 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/memcached-0"] Nov 22 05:02:24 crc kubenswrapper[4948]: W1122 05:02:24.162103 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2605c37d_54b8_4424_8068_ff0350f44403.slice/crio-f4acaf537dd3fa6937e18f2af49d5a62aa81e38f7241d065138e8dabb59d657c WatchSource:0}: Error finding container f4acaf537dd3fa6937e18f2af49d5a62aa81e38f7241d065138e8dabb59d657c: Status 404 returned error can't find the container with id f4acaf537dd3fa6937e18f2af49d5a62aa81e38f7241d065138e8dabb59d657c Nov 22 05:02:25 crc kubenswrapper[4948]: I1122 05:02:25.073359 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/memcached-0" event={"ID":"2605c37d-54b8-4424-8068-ff0350f44403","Type":"ContainerStarted","Data":"f4acaf537dd3fa6937e18f2af49d5a62aa81e38f7241d065138e8dabb59d657c"} Nov 22 05:02:25 crc kubenswrapper[4948]: I1122 05:02:25.074512 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-1" event={"ID":"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0","Type":"ContainerStarted","Data":"ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51"} Nov 22 05:02:25 crc kubenswrapper[4948]: I1122 05:02:25.999662 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-pwxf6"] Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.000565 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.002635 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-index-dockercfg-wwmg6" Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.012159 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-pwxf6"] Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.104125 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmrh2\" (UniqueName: \"kubernetes.io/projected/7740db48-038b-4961-9597-28784dcf9c2a-kube-api-access-jmrh2\") pod \"rabbitmq-cluster-operator-index-pwxf6\" (UID: \"7740db48-038b-4961-9597-28784dcf9c2a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.205822 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmrh2\" (UniqueName: \"kubernetes.io/projected/7740db48-038b-4961-9597-28784dcf9c2a-kube-api-access-jmrh2\") pod \"rabbitmq-cluster-operator-index-pwxf6\" (UID: \"7740db48-038b-4961-9597-28784dcf9c2a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.239229 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmrh2\" (UniqueName: \"kubernetes.io/projected/7740db48-038b-4961-9597-28784dcf9c2a-kube-api-access-jmrh2\") pod \"rabbitmq-cluster-operator-index-pwxf6\" (UID: \"7740db48-038b-4961-9597-28784dcf9c2a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.322973 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" Nov 22 05:02:26 crc kubenswrapper[4948]: I1122 05:02:26.796670 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-pwxf6"] Nov 22 05:02:27 crc kubenswrapper[4948]: I1122 05:02:27.093550 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" event={"ID":"7740db48-038b-4961-9597-28784dcf9c2a","Type":"ContainerStarted","Data":"6ad6fecfc4dc20b62d10c41e6ab63db118171f2209bea38b48c14d869c8674b1"} Nov 22 05:02:29 crc kubenswrapper[4948]: I1122 05:02:29.107224 4948 generic.go:334] "Generic (PLEG): container finished" podID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerID="02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3" exitCode=0 Nov 22 05:02:29 crc kubenswrapper[4948]: I1122 05:02:29.107321 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-0" event={"ID":"edc1757b-b350-47aa-94bd-a3f479b8d0ce","Type":"ContainerDied","Data":"02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3"} Nov 22 05:02:29 crc kubenswrapper[4948]: I1122 05:02:29.110297 4948 generic.go:334] "Generic (PLEG): container finished" podID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerID="0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb" exitCode=0 Nov 22 05:02:29 crc kubenswrapper[4948]: I1122 05:02:29.110351 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-2" event={"ID":"9b8859c7-31db-4617-acb7-096f251ea3ae","Type":"ContainerDied","Data":"0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb"} Nov 22 05:02:29 crc kubenswrapper[4948]: I1122 05:02:29.112837 4948 generic.go:334] "Generic (PLEG): container finished" podID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerID="ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51" exitCode=0 Nov 22 05:02:29 crc kubenswrapper[4948]: I1122 05:02:29.112876 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-1" event={"ID":"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0","Type":"ContainerDied","Data":"ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51"} Nov 22 05:02:30 crc kubenswrapper[4948]: I1122 05:02:30.395295 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-pwxf6"] Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.010101 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-v94v7"] Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.011599 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.016504 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-v94v7"] Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.074106 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phknx\" (UniqueName: \"kubernetes.io/projected/13f8cc32-0ab5-4ad9-be73-8c7b7730983b-kube-api-access-phknx\") pod \"rabbitmq-cluster-operator-index-v94v7\" (UID: \"13f8cc32-0ab5-4ad9-be73-8c7b7730983b\") " pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.124576 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-0" event={"ID":"edc1757b-b350-47aa-94bd-a3f479b8d0ce","Type":"ContainerStarted","Data":"db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed"} Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.127830 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-2" event={"ID":"9b8859c7-31db-4617-acb7-096f251ea3ae","Type":"ContainerStarted","Data":"8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624"} Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.129780 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/memcached-0" event={"ID":"2605c37d-54b8-4424-8068-ff0350f44403","Type":"ContainerStarted","Data":"c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4"} Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.129867 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.131720 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-1" event={"ID":"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0","Type":"ContainerStarted","Data":"e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977"} Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.149073 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/openstack-galera-0" podStartSLOduration=9.593009044 podStartE2EDuration="18.149055741s" podCreationTimestamp="2025-11-22 05:02:13 +0000 UTC" firstStartedPulling="2025-11-22 05:02:15.263079418 +0000 UTC m=+937.949089934" lastFinishedPulling="2025-11-22 05:02:23.819126115 +0000 UTC m=+946.505136631" observedRunningTime="2025-11-22 05:02:31.145331176 +0000 UTC m=+953.831341692" watchObservedRunningTime="2025-11-22 05:02:31.149055741 +0000 UTC m=+953.835066257" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.161780 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/memcached-0" podStartSLOduration=1.4532211259999999 podStartE2EDuration="8.161762911s" podCreationTimestamp="2025-11-22 05:02:23 +0000 UTC" firstStartedPulling="2025-11-22 05:02:24.164390913 +0000 UTC m=+946.850401429" lastFinishedPulling="2025-11-22 05:02:30.872932698 +0000 UTC m=+953.558943214" observedRunningTime="2025-11-22 05:02:31.161310018 +0000 UTC m=+953.847320534" watchObservedRunningTime="2025-11-22 05:02:31.161762911 +0000 UTC m=+953.847773427" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.174950 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phknx\" (UniqueName: \"kubernetes.io/projected/13f8cc32-0ab5-4ad9-be73-8c7b7730983b-kube-api-access-phknx\") pod \"rabbitmq-cluster-operator-index-v94v7\" (UID: \"13f8cc32-0ab5-4ad9-be73-8c7b7730983b\") " pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.202619 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/openstack-galera-1" podStartSLOduration=9.413746527 podStartE2EDuration="18.202585874s" podCreationTimestamp="2025-11-22 05:02:13 +0000 UTC" firstStartedPulling="2025-11-22 05:02:15.110904957 +0000 UTC m=+937.796915473" lastFinishedPulling="2025-11-22 05:02:23.899744304 +0000 UTC m=+946.585754820" observedRunningTime="2025-11-22 05:02:31.198380615 +0000 UTC m=+953.884391121" watchObservedRunningTime="2025-11-22 05:02:31.202585874 +0000 UTC m=+953.888596390" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.203870 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/openstack-galera-2" podStartSLOduration=9.532336038 podStartE2EDuration="18.20386242s" podCreationTimestamp="2025-11-22 05:02:13 +0000 UTC" firstStartedPulling="2025-11-22 05:02:15.258753915 +0000 UTC m=+937.944764431" lastFinishedPulling="2025-11-22 05:02:23.930280297 +0000 UTC m=+946.616290813" observedRunningTime="2025-11-22 05:02:31.181412236 +0000 UTC m=+953.867422762" watchObservedRunningTime="2025-11-22 05:02:31.20386242 +0000 UTC m=+953.889872946" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.204760 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phknx\" (UniqueName: \"kubernetes.io/projected/13f8cc32-0ab5-4ad9-be73-8c7b7730983b-kube-api-access-phknx\") pod \"rabbitmq-cluster-operator-index-v94v7\" (UID: \"13f8cc32-0ab5-4ad9-be73-8c7b7730983b\") " pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.344380 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:31 crc kubenswrapper[4948]: I1122 05:02:31.820415 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-v94v7"] Nov 22 05:02:32 crc kubenswrapper[4948]: I1122 05:02:32.137535 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" event={"ID":"13f8cc32-0ab5-4ad9-be73-8c7b7730983b","Type":"ContainerStarted","Data":"e9e92002af933c52b554cd3c5a9744deaf6e9baef341e4e813513bcb4317a6a2"} Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.152291 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" event={"ID":"13f8cc32-0ab5-4ad9-be73-8c7b7730983b","Type":"ContainerStarted","Data":"87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70"} Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.155446 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" event={"ID":"7740db48-038b-4961-9597-28784dcf9c2a","Type":"ContainerStarted","Data":"a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa"} Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.155633 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" podUID="7740db48-038b-4961-9597-28784dcf9c2a" containerName="registry-server" containerID="cri-o://a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa" gracePeriod=2 Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.178623 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" podStartSLOduration=2.60432007 podStartE2EDuration="4.178600501s" podCreationTimestamp="2025-11-22 05:02:30 +0000 UTC" firstStartedPulling="2025-11-22 05:02:31.85490523 +0000 UTC m=+954.540915746" lastFinishedPulling="2025-11-22 05:02:33.429185661 +0000 UTC m=+956.115196177" observedRunningTime="2025-11-22 05:02:34.169786422 +0000 UTC m=+956.855796938" watchObservedRunningTime="2025-11-22 05:02:34.178600501 +0000 UTC m=+956.864611017" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.194620 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" podStartSLOduration=2.558033413 podStartE2EDuration="9.194588143s" podCreationTimestamp="2025-11-22 05:02:25 +0000 UTC" firstStartedPulling="2025-11-22 05:02:26.803168849 +0000 UTC m=+949.489179365" lastFinishedPulling="2025-11-22 05:02:33.439723579 +0000 UTC m=+956.125734095" observedRunningTime="2025-11-22 05:02:34.192325629 +0000 UTC m=+956.878336155" watchObservedRunningTime="2025-11-22 05:02:34.194588143 +0000 UTC m=+956.880598659" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.534596 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.627071 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmrh2\" (UniqueName: \"kubernetes.io/projected/7740db48-038b-4961-9597-28784dcf9c2a-kube-api-access-jmrh2\") pod \"7740db48-038b-4961-9597-28784dcf9c2a\" (UID: \"7740db48-038b-4961-9597-28784dcf9c2a\") " Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.632006 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7740db48-038b-4961-9597-28784dcf9c2a-kube-api-access-jmrh2" (OuterVolumeSpecName: "kube-api-access-jmrh2") pod "7740db48-038b-4961-9597-28784dcf9c2a" (UID: "7740db48-038b-4961-9597-28784dcf9c2a"). InnerVolumeSpecName "kube-api-access-jmrh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.728714 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmrh2\" (UniqueName: \"kubernetes.io/projected/7740db48-038b-4961-9597-28784dcf9c2a-kube-api-access-jmrh2\") on node \"crc\" DevicePath \"\"" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.807857 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.808100 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.824171 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.824219 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.843919 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:34 crc kubenswrapper[4948]: I1122 05:02:34.844098 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.169914 4948 generic.go:334] "Generic (PLEG): container finished" podID="7740db48-038b-4961-9597-28784dcf9c2a" containerID="a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa" exitCode=0 Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.169978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" event={"ID":"7740db48-038b-4961-9597-28784dcf9c2a","Type":"ContainerDied","Data":"a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa"} Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.170041 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" event={"ID":"7740db48-038b-4961-9597-28784dcf9c2a","Type":"ContainerDied","Data":"6ad6fecfc4dc20b62d10c41e6ab63db118171f2209bea38b48c14d869c8674b1"} Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.170060 4948 scope.go:117] "RemoveContainer" containerID="a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa" Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.169990 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-pwxf6" Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.193880 4948 scope.go:117] "RemoveContainer" containerID="a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa" Nov 22 05:02:35 crc kubenswrapper[4948]: E1122 05:02:35.194397 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa\": container with ID starting with a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa not found: ID does not exist" containerID="a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa" Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.194560 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa"} err="failed to get container status \"a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa\": rpc error: code = NotFound desc = could not find container \"a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa\": container with ID starting with a99bf2cf3d29b73412d676c88d7fd65fad29173bbfd3b12ef0dc96d7cf32f4aa not found: ID does not exist" Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.202038 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-pwxf6"] Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.205807 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-pwxf6"] Nov 22 05:02:35 crc kubenswrapper[4948]: I1122 05:02:35.768172 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7740db48-038b-4961-9597-28784dcf9c2a" path="/var/lib/kubelet/pods/7740db48-038b-4961-9597-28784dcf9c2a/volumes" Nov 22 05:02:38 crc kubenswrapper[4948]: I1122 05:02:38.571587 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/memcached-0" Nov 22 05:02:38 crc kubenswrapper[4948]: I1122 05:02:38.888736 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:38 crc kubenswrapper[4948]: I1122 05:02:38.935027 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:02:41 crc kubenswrapper[4948]: I1122 05:02:41.344876 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:41 crc kubenswrapper[4948]: I1122 05:02:41.345276 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:41 crc kubenswrapper[4948]: I1122 05:02:41.397608 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:42 crc kubenswrapper[4948]: I1122 05:02:42.280889 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.849515 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558"] Nov 22 05:02:43 crc kubenswrapper[4948]: E1122 05:02:43.850431 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7740db48-038b-4961-9597-28784dcf9c2a" containerName="registry-server" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.850501 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7740db48-038b-4961-9597-28784dcf9c2a" containerName="registry-server" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.850803 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7740db48-038b-4961-9597-28784dcf9c2a" containerName="registry-server" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.852775 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.854911 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558"] Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.854934 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-77z46" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.993069 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfbvf\" (UniqueName: \"kubernetes.io/projected/82190887-0d93-4244-ae49-fcb691f23f0c-kube-api-access-lfbvf\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.993120 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:43 crc kubenswrapper[4948]: I1122 05:02:43.993166 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.094303 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfbvf\" (UniqueName: \"kubernetes.io/projected/82190887-0d93-4244-ae49-fcb691f23f0c-kube-api-access-lfbvf\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.094358 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.094423 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.094932 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.095247 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.122258 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfbvf\" (UniqueName: \"kubernetes.io/projected/82190887-0d93-4244-ae49-fcb691f23f0c-kube-api-access-lfbvf\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.167652 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.584479 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558"] Nov 22 05:02:44 crc kubenswrapper[4948]: I1122 05:02:44.874430 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="manila-kuttl-tests/openstack-galera-2" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="galera" probeResult="failure" output=< Nov 22 05:02:44 crc kubenswrapper[4948]: wsrep_local_state_comment (Donor/Desynced) differs from Synced Nov 22 05:02:44 crc kubenswrapper[4948]: > Nov 22 05:02:45 crc kubenswrapper[4948]: I1122 05:02:45.244199 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" event={"ID":"82190887-0d93-4244-ae49-fcb691f23f0c","Type":"ContainerStarted","Data":"2b847934ac8744039eec07b3ba65c725d8c398039e28598694044c8263d31857"} Nov 22 05:02:53 crc kubenswrapper[4948]: I1122 05:02:53.301544 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" event={"ID":"82190887-0d93-4244-ae49-fcb691f23f0c","Type":"ContainerStarted","Data":"fd80e05a440781d462ad691a501ecc23bfcc03e89eba413f2be4af1f0b8ad470"} Nov 22 05:02:54 crc kubenswrapper[4948]: I1122 05:02:54.875353 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="manila-kuttl-tests/openstack-galera-2" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="galera" probeResult="failure" output=< Nov 22 05:02:54 crc kubenswrapper[4948]: wsrep_local_state_comment (Donor/Desynced) differs from Synced Nov 22 05:02:54 crc kubenswrapper[4948]: > Nov 22 05:02:55 crc kubenswrapper[4948]: I1122 05:02:55.320151 4948 generic.go:334] "Generic (PLEG): container finished" podID="82190887-0d93-4244-ae49-fcb691f23f0c" containerID="fd80e05a440781d462ad691a501ecc23bfcc03e89eba413f2be4af1f0b8ad470" exitCode=0 Nov 22 05:02:55 crc kubenswrapper[4948]: I1122 05:02:55.320214 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" event={"ID":"82190887-0d93-4244-ae49-fcb691f23f0c","Type":"ContainerDied","Data":"fd80e05a440781d462ad691a501ecc23bfcc03e89eba413f2be4af1f0b8ad470"} Nov 22 05:02:56 crc kubenswrapper[4948]: I1122 05:02:56.329911 4948 generic.go:334] "Generic (PLEG): container finished" podID="82190887-0d93-4244-ae49-fcb691f23f0c" containerID="cb60cefb26d31097f53e3c849669eaf0a1d9a87d57effa58e3bc8c66e86f83b4" exitCode=0 Nov 22 05:02:56 crc kubenswrapper[4948]: I1122 05:02:56.329990 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" event={"ID":"82190887-0d93-4244-ae49-fcb691f23f0c","Type":"ContainerDied","Data":"cb60cefb26d31097f53e3c849669eaf0a1d9a87d57effa58e3bc8c66e86f83b4"} Nov 22 05:02:57 crc kubenswrapper[4948]: I1122 05:02:57.338021 4948 generic.go:334] "Generic (PLEG): container finished" podID="82190887-0d93-4244-ae49-fcb691f23f0c" containerID="5fc40c3eac6d4a3cb84579893c2ee0eb5849d8f3c9c087c53b2b8487aad10ec2" exitCode=0 Nov 22 05:02:57 crc kubenswrapper[4948]: I1122 05:02:57.338090 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" event={"ID":"82190887-0d93-4244-ae49-fcb691f23f0c","Type":"ContainerDied","Data":"5fc40c3eac6d4a3cb84579893c2ee0eb5849d8f3c9c087c53b2b8487aad10ec2"} Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.643678 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.725096 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfbvf\" (UniqueName: \"kubernetes.io/projected/82190887-0d93-4244-ae49-fcb691f23f0c-kube-api-access-lfbvf\") pod \"82190887-0d93-4244-ae49-fcb691f23f0c\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.725266 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-bundle\") pod \"82190887-0d93-4244-ae49-fcb691f23f0c\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.725327 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-util\") pod \"82190887-0d93-4244-ae49-fcb691f23f0c\" (UID: \"82190887-0d93-4244-ae49-fcb691f23f0c\") " Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.726066 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-bundle" (OuterVolumeSpecName: "bundle") pod "82190887-0d93-4244-ae49-fcb691f23f0c" (UID: "82190887-0d93-4244-ae49-fcb691f23f0c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.730017 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82190887-0d93-4244-ae49-fcb691f23f0c-kube-api-access-lfbvf" (OuterVolumeSpecName: "kube-api-access-lfbvf") pod "82190887-0d93-4244-ae49-fcb691f23f0c" (UID: "82190887-0d93-4244-ae49-fcb691f23f0c"). InnerVolumeSpecName "kube-api-access-lfbvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.802936 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-util" (OuterVolumeSpecName: "util") pod "82190887-0d93-4244-ae49-fcb691f23f0c" (UID: "82190887-0d93-4244-ae49-fcb691f23f0c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.827100 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.827156 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/82190887-0d93-4244-ae49-fcb691f23f0c-util\") on node \"crc\" DevicePath \"\"" Nov 22 05:02:58 crc kubenswrapper[4948]: I1122 05:02:58.827182 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfbvf\" (UniqueName: \"kubernetes.io/projected/82190887-0d93-4244-ae49-fcb691f23f0c-kube-api-access-lfbvf\") on node \"crc\" DevicePath \"\"" Nov 22 05:02:59 crc kubenswrapper[4948]: I1122 05:02:59.351269 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" event={"ID":"82190887-0d93-4244-ae49-fcb691f23f0c","Type":"ContainerDied","Data":"2b847934ac8744039eec07b3ba65c725d8c398039e28598694044c8263d31857"} Nov 22 05:02:59 crc kubenswrapper[4948]: I1122 05:02:59.351666 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b847934ac8744039eec07b3ba65c725d8c398039e28598694044c8263d31857" Nov 22 05:02:59 crc kubenswrapper[4948]: I1122 05:02:59.351805 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558" Nov 22 05:03:00 crc kubenswrapper[4948]: I1122 05:03:00.514069 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:03:00 crc kubenswrapper[4948]: I1122 05:03:00.563331 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:03:00 crc kubenswrapper[4948]: E1122 05:03:00.662783 4948 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:49666->38.102.83.223:45565: write tcp 38.102.83.223:49666->38.102.83.223:45565: write: broken pipe Nov 22 05:03:05 crc kubenswrapper[4948]: I1122 05:03:05.676875 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:03:05 crc kubenswrapper[4948]: I1122 05:03:05.732645 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.188629 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j"] Nov 22 05:03:10 crc kubenswrapper[4948]: E1122 05:03:10.189641 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" containerName="util" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.189668 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" containerName="util" Nov 22 05:03:10 crc kubenswrapper[4948]: E1122 05:03:10.189685 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" containerName="extract" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.189696 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" containerName="extract" Nov 22 05:03:10 crc kubenswrapper[4948]: E1122 05:03:10.189713 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" containerName="pull" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.189725 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" containerName="pull" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.189937 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" containerName="extract" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.190627 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.192105 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-dockercfg-9ngbw" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.197777 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j"] Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.210090 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55lpv\" (UniqueName: \"kubernetes.io/projected/38b67bbd-f7f5-44f4-9383-ac7ba57e4554-kube-api-access-55lpv\") pod \"rabbitmq-cluster-operator-779fc9694b-wcc9j\" (UID: \"38b67bbd-f7f5-44f4-9383-ac7ba57e4554\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.312880 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55lpv\" (UniqueName: \"kubernetes.io/projected/38b67bbd-f7f5-44f4-9383-ac7ba57e4554-kube-api-access-55lpv\") pod \"rabbitmq-cluster-operator-779fc9694b-wcc9j\" (UID: \"38b67bbd-f7f5-44f4-9383-ac7ba57e4554\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.335276 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55lpv\" (UniqueName: \"kubernetes.io/projected/38b67bbd-f7f5-44f4-9383-ac7ba57e4554-kube-api-access-55lpv\") pod \"rabbitmq-cluster-operator-779fc9694b-wcc9j\" (UID: \"38b67bbd-f7f5-44f4-9383-ac7ba57e4554\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.510065 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" Nov 22 05:03:10 crc kubenswrapper[4948]: I1122 05:03:10.949946 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j"] Nov 22 05:03:11 crc kubenswrapper[4948]: I1122 05:03:11.441250 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" event={"ID":"38b67bbd-f7f5-44f4-9383-ac7ba57e4554","Type":"ContainerStarted","Data":"7b494bcba36df8c20a18c6b6ab11c0cfcb7f8633cd572f189d38e1da1c880bb5"} Nov 22 05:03:17 crc kubenswrapper[4948]: I1122 05:03:17.481169 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" event={"ID":"38b67bbd-f7f5-44f4-9383-ac7ba57e4554","Type":"ContainerStarted","Data":"5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10"} Nov 22 05:03:17 crc kubenswrapper[4948]: I1122 05:03:17.499502 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" podStartSLOduration=2.27410382 podStartE2EDuration="7.499483024s" podCreationTimestamp="2025-11-22 05:03:10 +0000 UTC" firstStartedPulling="2025-11-22 05:03:10.958559868 +0000 UTC m=+993.644570394" lastFinishedPulling="2025-11-22 05:03:16.183939072 +0000 UTC m=+998.869949598" observedRunningTime="2025-11-22 05:03:17.497048003 +0000 UTC m=+1000.183058539" watchObservedRunningTime="2025-11-22 05:03:17.499483024 +0000 UTC m=+1000.185493560" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.250854 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/rabbitmq-server-0"] Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.252455 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.257023 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"rabbitmq-default-user" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.257151 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"rabbitmq-erlang-cookie" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.257169 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"rabbitmq-server-dockercfg-lqtw4" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.257269 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"manila-kuttl-tests"/"rabbitmq-plugins-conf" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.257350 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"manila-kuttl-tests"/"rabbitmq-server-conf" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.270530 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/rabbitmq-server-0"] Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289431 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289574 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289636 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289694 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289728 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289761 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289802 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.289877 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dlbn\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-kube-api-access-7dlbn\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391002 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391083 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391122 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391163 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391187 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391206 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391235 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.391283 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dlbn\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-kube-api-access-7dlbn\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.394683 4948 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.394720 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1fa1b88928eb537ad214dfa936956d73eaf53eb7e89f9346e730df0c81f2e4f8/globalmount\"" pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.400489 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.401034 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.405305 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.412526 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.417331 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.419492 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.420766 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dlbn\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-kube-api-access-7dlbn\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.437363 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\") pod \"rabbitmq-server-0\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:21 crc kubenswrapper[4948]: I1122 05:03:21.574161 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:03:22 crc kubenswrapper[4948]: I1122 05:03:22.025653 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/rabbitmq-server-0"] Nov 22 05:03:22 crc kubenswrapper[4948]: I1122 05:03:22.509937 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/rabbitmq-server-0" event={"ID":"8c58a5c1-3e51-491f-af14-9a795b1bdc3c","Type":"ContainerStarted","Data":"717f73d5205e78753ffb71cc1a1d840b016dab25426404b920eb47bfa60b7109"} Nov 22 05:03:22 crc kubenswrapper[4948]: I1122 05:03:22.804504 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-index-hq6cx"] Nov 22 05:03:22 crc kubenswrapper[4948]: I1122 05:03:22.805676 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:22 crc kubenswrapper[4948]: I1122 05:03:22.811680 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-index-dockercfg-4vjwb" Nov 22 05:03:22 crc kubenswrapper[4948]: I1122 05:03:22.821409 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-hq6cx"] Nov 22 05:03:22 crc kubenswrapper[4948]: I1122 05:03:22.914390 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4lcd\" (UniqueName: \"kubernetes.io/projected/9af72c60-8ecb-4ffd-af6b-f17019153fb6-kube-api-access-j4lcd\") pod \"keystone-operator-index-hq6cx\" (UID: \"9af72c60-8ecb-4ffd-af6b-f17019153fb6\") " pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:23 crc kubenswrapper[4948]: I1122 05:03:23.017757 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4lcd\" (UniqueName: \"kubernetes.io/projected/9af72c60-8ecb-4ffd-af6b-f17019153fb6-kube-api-access-j4lcd\") pod \"keystone-operator-index-hq6cx\" (UID: \"9af72c60-8ecb-4ffd-af6b-f17019153fb6\") " pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:23 crc kubenswrapper[4948]: I1122 05:03:23.044730 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4lcd\" (UniqueName: \"kubernetes.io/projected/9af72c60-8ecb-4ffd-af6b-f17019153fb6-kube-api-access-j4lcd\") pod \"keystone-operator-index-hq6cx\" (UID: \"9af72c60-8ecb-4ffd-af6b-f17019153fb6\") " pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:23 crc kubenswrapper[4948]: I1122 05:03:23.135029 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:23 crc kubenswrapper[4948]: I1122 05:03:23.407929 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-hq6cx"] Nov 22 05:03:23 crc kubenswrapper[4948]: I1122 05:03:23.517254 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-hq6cx" event={"ID":"9af72c60-8ecb-4ffd-af6b-f17019153fb6","Type":"ContainerStarted","Data":"3bbaca832c2c7203655f2b1c5d96cf47829cdd50ccae91d87420dd090255ed8b"} Nov 22 05:03:28 crc kubenswrapper[4948]: I1122 05:03:28.548953 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-hq6cx" event={"ID":"9af72c60-8ecb-4ffd-af6b-f17019153fb6","Type":"ContainerStarted","Data":"8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e"} Nov 22 05:03:28 crc kubenswrapper[4948]: I1122 05:03:28.563024 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-index-hq6cx" podStartSLOduration=1.7031924520000001 podStartE2EDuration="6.563008817s" podCreationTimestamp="2025-11-22 05:03:22 +0000 UTC" firstStartedPulling="2025-11-22 05:03:23.418092273 +0000 UTC m=+1006.104102789" lastFinishedPulling="2025-11-22 05:03:28.277908638 +0000 UTC m=+1010.963919154" observedRunningTime="2025-11-22 05:03:28.562657628 +0000 UTC m=+1011.248668154" watchObservedRunningTime="2025-11-22 05:03:28.563008817 +0000 UTC m=+1011.249019333" Nov 22 05:03:29 crc kubenswrapper[4948]: I1122 05:03:29.558052 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/rabbitmq-server-0" event={"ID":"8c58a5c1-3e51-491f-af14-9a795b1bdc3c","Type":"ContainerStarted","Data":"8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4"} Nov 22 05:03:33 crc kubenswrapper[4948]: I1122 05:03:33.135627 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:33 crc kubenswrapper[4948]: I1122 05:03:33.135991 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:33 crc kubenswrapper[4948]: I1122 05:03:33.176939 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:33 crc kubenswrapper[4948]: I1122 05:03:33.619600 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.640886 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p"] Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.643873 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.646953 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-77z46" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.652534 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p"] Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.722847 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-util\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.722927 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-bundle\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.723033 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6msb7\" (UniqueName: \"kubernetes.io/projected/51be5375-cea2-4a66-a424-d850caad872c-kube-api-access-6msb7\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.824650 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-bundle\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.824725 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6msb7\" (UniqueName: \"kubernetes.io/projected/51be5375-cea2-4a66-a424-d850caad872c-kube-api-access-6msb7\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.824807 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-util\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.825283 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-bundle\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.825355 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-util\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.851435 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6msb7\" (UniqueName: \"kubernetes.io/projected/51be5375-cea2-4a66-a424-d850caad872c-kube-api-access-6msb7\") pod \"673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:43 crc kubenswrapper[4948]: I1122 05:03:37.973235 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:44 crc kubenswrapper[4948]: W1122 05:03:44.131978 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51be5375_cea2_4a66_a424_d850caad872c.slice/crio-330984546f396dd82f4a88fc7d8805363c8cfa711fca8197b9a4c63953fe456e WatchSource:0}: Error finding container 330984546f396dd82f4a88fc7d8805363c8cfa711fca8197b9a4c63953fe456e: Status 404 returned error can't find the container with id 330984546f396dd82f4a88fc7d8805363c8cfa711fca8197b9a4c63953fe456e Nov 22 05:03:44 crc kubenswrapper[4948]: I1122 05:03:44.139425 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p"] Nov 22 05:03:44 crc kubenswrapper[4948]: I1122 05:03:44.662751 4948 generic.go:334] "Generic (PLEG): container finished" podID="51be5375-cea2-4a66-a424-d850caad872c" containerID="1fa977130fa41ff8fc0923b1c393c05385e1c144fd647e13bcbb3cc67369fa39" exitCode=0 Nov 22 05:03:44 crc kubenswrapper[4948]: I1122 05:03:44.662799 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" event={"ID":"51be5375-cea2-4a66-a424-d850caad872c","Type":"ContainerDied","Data":"1fa977130fa41ff8fc0923b1c393c05385e1c144fd647e13bcbb3cc67369fa39"} Nov 22 05:03:44 crc kubenswrapper[4948]: I1122 05:03:44.662823 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" event={"ID":"51be5375-cea2-4a66-a424-d850caad872c","Type":"ContainerStarted","Data":"330984546f396dd82f4a88fc7d8805363c8cfa711fca8197b9a4c63953fe456e"} Nov 22 05:03:44 crc kubenswrapper[4948]: I1122 05:03:44.664325 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 05:03:45 crc kubenswrapper[4948]: I1122 05:03:45.670098 4948 generic.go:334] "Generic (PLEG): container finished" podID="51be5375-cea2-4a66-a424-d850caad872c" containerID="81b80e5ff0f4b9842b2f38e86477d8a89de33250f984699eac217c7493755414" exitCode=0 Nov 22 05:03:45 crc kubenswrapper[4948]: I1122 05:03:45.670202 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" event={"ID":"51be5375-cea2-4a66-a424-d850caad872c","Type":"ContainerDied","Data":"81b80e5ff0f4b9842b2f38e86477d8a89de33250f984699eac217c7493755414"} Nov 22 05:03:46 crc kubenswrapper[4948]: I1122 05:03:46.679646 4948 generic.go:334] "Generic (PLEG): container finished" podID="51be5375-cea2-4a66-a424-d850caad872c" containerID="7bc741ac3b487b112e01588019322a1d6ec70fa313fd764d368a20779256e958" exitCode=0 Nov 22 05:03:46 crc kubenswrapper[4948]: I1122 05:03:46.679703 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" event={"ID":"51be5375-cea2-4a66-a424-d850caad872c","Type":"ContainerDied","Data":"7bc741ac3b487b112e01588019322a1d6ec70fa313fd764d368a20779256e958"} Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.025874 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.179335 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-util\") pod \"51be5375-cea2-4a66-a424-d850caad872c\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.179583 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-bundle\") pod \"51be5375-cea2-4a66-a424-d850caad872c\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.179753 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6msb7\" (UniqueName: \"kubernetes.io/projected/51be5375-cea2-4a66-a424-d850caad872c-kube-api-access-6msb7\") pod \"51be5375-cea2-4a66-a424-d850caad872c\" (UID: \"51be5375-cea2-4a66-a424-d850caad872c\") " Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.180809 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-bundle" (OuterVolumeSpecName: "bundle") pod "51be5375-cea2-4a66-a424-d850caad872c" (UID: "51be5375-cea2-4a66-a424-d850caad872c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.189673 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51be5375-cea2-4a66-a424-d850caad872c-kube-api-access-6msb7" (OuterVolumeSpecName: "kube-api-access-6msb7") pod "51be5375-cea2-4a66-a424-d850caad872c" (UID: "51be5375-cea2-4a66-a424-d850caad872c"). InnerVolumeSpecName "kube-api-access-6msb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.219097 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-util" (OuterVolumeSpecName: "util") pod "51be5375-cea2-4a66-a424-d850caad872c" (UID: "51be5375-cea2-4a66-a424-d850caad872c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.281811 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.281857 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6msb7\" (UniqueName: \"kubernetes.io/projected/51be5375-cea2-4a66-a424-d850caad872c-kube-api-access-6msb7\") on node \"crc\" DevicePath \"\"" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.281870 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/51be5375-cea2-4a66-a424-d850caad872c-util\") on node \"crc\" DevicePath \"\"" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.701436 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" event={"ID":"51be5375-cea2-4a66-a424-d850caad872c","Type":"ContainerDied","Data":"330984546f396dd82f4a88fc7d8805363c8cfa711fca8197b9a4c63953fe456e"} Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.701799 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="330984546f396dd82f4a88fc7d8805363c8cfa711fca8197b9a4c63953fe456e" Nov 22 05:03:48 crc kubenswrapper[4948]: I1122 05:03:48.701695 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.821027 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5"] Nov 22 05:03:54 crc kubenswrapper[4948]: E1122 05:03:54.821674 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51be5375-cea2-4a66-a424-d850caad872c" containerName="util" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.821686 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51be5375-cea2-4a66-a424-d850caad872c" containerName="util" Nov 22 05:03:54 crc kubenswrapper[4948]: E1122 05:03:54.821711 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51be5375-cea2-4a66-a424-d850caad872c" containerName="extract" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.821718 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51be5375-cea2-4a66-a424-d850caad872c" containerName="extract" Nov 22 05:03:54 crc kubenswrapper[4948]: E1122 05:03:54.821728 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51be5375-cea2-4a66-a424-d850caad872c" containerName="pull" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.821734 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51be5375-cea2-4a66-a424-d850caad872c" containerName="pull" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.821845 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="51be5375-cea2-4a66-a424-d850caad872c" containerName="extract" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.822443 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.840899 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-service-cert" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.840983 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-qgxs6" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.845177 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5"] Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.986258 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-webhook-cert\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.986579 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwcwt\" (UniqueName: \"kubernetes.io/projected/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-kube-api-access-jwcwt\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:54 crc kubenswrapper[4948]: I1122 05:03:54.986672 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-apiservice-cert\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.088334 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwcwt\" (UniqueName: \"kubernetes.io/projected/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-kube-api-access-jwcwt\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.088923 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-apiservice-cert\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.089100 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-webhook-cert\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.096056 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-webhook-cert\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.096145 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-apiservice-cert\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.118100 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwcwt\" (UniqueName: \"kubernetes.io/projected/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-kube-api-access-jwcwt\") pod \"keystone-operator-controller-manager-5dfd7c9c5b-7b8x5\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.216432 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.630916 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5"] Nov 22 05:03:55 crc kubenswrapper[4948]: W1122 05:03:55.638122 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30513ce9_a925_49b0_b8d8_e9a1eb92bc11.slice/crio-075b31fa43676e064f91d733416feb0e17134e984fee60b24a8bcced8a93bb6e WatchSource:0}: Error finding container 075b31fa43676e064f91d733416feb0e17134e984fee60b24a8bcced8a93bb6e: Status 404 returned error can't find the container with id 075b31fa43676e064f91d733416feb0e17134e984fee60b24a8bcced8a93bb6e Nov 22 05:03:55 crc kubenswrapper[4948]: I1122 05:03:55.744677 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" event={"ID":"30513ce9-a925-49b0-b8d8-e9a1eb92bc11","Type":"ContainerStarted","Data":"075b31fa43676e064f91d733416feb0e17134e984fee60b24a8bcced8a93bb6e"} Nov 22 05:03:57 crc kubenswrapper[4948]: I1122 05:03:57.772252 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" event={"ID":"30513ce9-a925-49b0-b8d8-e9a1eb92bc11","Type":"ContainerStarted","Data":"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316"} Nov 22 05:03:58 crc kubenswrapper[4948]: I1122 05:03:58.771167 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" event={"ID":"30513ce9-a925-49b0-b8d8-e9a1eb92bc11","Type":"ContainerStarted","Data":"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b"} Nov 22 05:03:58 crc kubenswrapper[4948]: I1122 05:03:58.771795 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:03:58 crc kubenswrapper[4948]: I1122 05:03:58.810220 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" podStartSLOduration=3.069881464 podStartE2EDuration="4.810198855s" podCreationTimestamp="2025-11-22 05:03:54 +0000 UTC" firstStartedPulling="2025-11-22 05:03:55.640154493 +0000 UTC m=+1038.326165009" lastFinishedPulling="2025-11-22 05:03:57.380471884 +0000 UTC m=+1040.066482400" observedRunningTime="2025-11-22 05:03:58.807211431 +0000 UTC m=+1041.493221947" watchObservedRunningTime="2025-11-22 05:03:58.810198855 +0000 UTC m=+1041.496209371" Nov 22 05:03:59 crc kubenswrapper[4948]: I1122 05:03:59.789515 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:03:59 crc kubenswrapper[4948]: I1122 05:03:59.789590 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:04:01 crc kubenswrapper[4948]: I1122 05:04:01.791187 4948 generic.go:334] "Generic (PLEG): container finished" podID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerID="8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4" exitCode=0 Nov 22 05:04:01 crc kubenswrapper[4948]: I1122 05:04:01.791271 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/rabbitmq-server-0" event={"ID":"8c58a5c1-3e51-491f-af14-9a795b1bdc3c","Type":"ContainerDied","Data":"8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4"} Nov 22 05:04:02 crc kubenswrapper[4948]: I1122 05:04:02.806743 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/rabbitmq-server-0" event={"ID":"8c58a5c1-3e51-491f-af14-9a795b1bdc3c","Type":"ContainerStarted","Data":"6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472"} Nov 22 05:04:02 crc kubenswrapper[4948]: I1122 05:04:02.807379 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:04:02 crc kubenswrapper[4948]: I1122 05:04:02.848977 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/rabbitmq-server-0" podStartSLOduration=36.838280058 podStartE2EDuration="42.848943729s" podCreationTimestamp="2025-11-22 05:03:20 +0000 UTC" firstStartedPulling="2025-11-22 05:03:22.037880418 +0000 UTC m=+1004.723890974" lastFinishedPulling="2025-11-22 05:03:28.048544119 +0000 UTC m=+1010.734554645" observedRunningTime="2025-11-22 05:04:02.840245023 +0000 UTC m=+1045.526255619" watchObservedRunningTime="2025-11-22 05:04:02.848943729 +0000 UTC m=+1045.534954315" Nov 22 05:04:05 crc kubenswrapper[4948]: I1122 05:04:05.220702 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.059555 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/keystone-db-create-flbjx"] Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.061904 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-create-flbjx" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.070983 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-db-create-flbjx"] Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.106446 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g58wp\" (UniqueName: \"kubernetes.io/projected/d36b03e3-7b82-45ae-8ba7-59af108509b7-kube-api-access-g58wp\") pod \"keystone-db-create-flbjx\" (UID: \"d36b03e3-7b82-45ae-8ba7-59af108509b7\") " pod="manila-kuttl-tests/keystone-db-create-flbjx" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.208730 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g58wp\" (UniqueName: \"kubernetes.io/projected/d36b03e3-7b82-45ae-8ba7-59af108509b7-kube-api-access-g58wp\") pod \"keystone-db-create-flbjx\" (UID: \"d36b03e3-7b82-45ae-8ba7-59af108509b7\") " pod="manila-kuttl-tests/keystone-db-create-flbjx" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.234799 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g58wp\" (UniqueName: \"kubernetes.io/projected/d36b03e3-7b82-45ae-8ba7-59af108509b7-kube-api-access-g58wp\") pod \"keystone-db-create-flbjx\" (UID: \"d36b03e3-7b82-45ae-8ba7-59af108509b7\") " pod="manila-kuttl-tests/keystone-db-create-flbjx" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.337813 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/ceph"] Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.338885 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.343313 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"default-dockercfg-lqcnt" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.389192 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-create-flbjx" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.410935 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-log\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.410989 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rvcj\" (UniqueName: \"kubernetes.io/projected/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-kube-api-access-6rvcj\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.411197 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-run\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.411268 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-data\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.512421 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-run\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.512739 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-data\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.512810 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-log\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.512839 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rvcj\" (UniqueName: \"kubernetes.io/projected/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-kube-api-access-6rvcj\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.513501 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-run\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.513752 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-data\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.514825 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-log\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.533338 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rvcj\" (UniqueName: \"kubernetes.io/projected/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-kube-api-access-6rvcj\") pod \"ceph\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.672355 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/ceph" Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.789804 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-db-create-flbjx"] Nov 22 05:04:10 crc kubenswrapper[4948]: W1122 05:04:10.795861 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd36b03e3_7b82_45ae_8ba7_59af108509b7.slice/crio-248a1c34d31aaaf491cd91848d8f55b93fe4693f2da10decd74383010c76f65e WatchSource:0}: Error finding container 248a1c34d31aaaf491cd91848d8f55b93fe4693f2da10decd74383010c76f65e: Status 404 returned error can't find the container with id 248a1c34d31aaaf491cd91848d8f55b93fe4693f2da10decd74383010c76f65e Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.867164 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-db-create-flbjx" event={"ID":"d36b03e3-7b82-45ae-8ba7-59af108509b7","Type":"ContainerStarted","Data":"248a1c34d31aaaf491cd91848d8f55b93fe4693f2da10decd74383010c76f65e"} Nov 22 05:04:10 crc kubenswrapper[4948]: I1122 05:04:10.868361 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/ceph" event={"ID":"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754","Type":"ContainerStarted","Data":"25d198bf2b240f97245bc0550db9b795c308661ca2d99acf6a767c0deee1348d"} Nov 22 05:04:11 crc kubenswrapper[4948]: I1122 05:04:11.578266 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:04:11 crc kubenswrapper[4948]: I1122 05:04:11.875330 4948 generic.go:334] "Generic (PLEG): container finished" podID="d36b03e3-7b82-45ae-8ba7-59af108509b7" containerID="69f492b59970a6586ed2c0f4211951c4c376c8b7a322a4a1371d08fc3f9bc6c7" exitCode=0 Nov 22 05:04:11 crc kubenswrapper[4948]: I1122 05:04:11.875377 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-db-create-flbjx" event={"ID":"d36b03e3-7b82-45ae-8ba7-59af108509b7","Type":"ContainerDied","Data":"69f492b59970a6586ed2c0f4211951c4c376c8b7a322a4a1371d08fc3f9bc6c7"} Nov 22 05:04:15 crc kubenswrapper[4948]: I1122 05:04:15.406520 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-create-flbjx" Nov 22 05:04:15 crc kubenswrapper[4948]: I1122 05:04:15.501202 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g58wp\" (UniqueName: \"kubernetes.io/projected/d36b03e3-7b82-45ae-8ba7-59af108509b7-kube-api-access-g58wp\") pod \"d36b03e3-7b82-45ae-8ba7-59af108509b7\" (UID: \"d36b03e3-7b82-45ae-8ba7-59af108509b7\") " Nov 22 05:04:15 crc kubenswrapper[4948]: I1122 05:04:15.513458 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d36b03e3-7b82-45ae-8ba7-59af108509b7-kube-api-access-g58wp" (OuterVolumeSpecName: "kube-api-access-g58wp") pod "d36b03e3-7b82-45ae-8ba7-59af108509b7" (UID: "d36b03e3-7b82-45ae-8ba7-59af108509b7"). InnerVolumeSpecName "kube-api-access-g58wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:04:15 crc kubenswrapper[4948]: I1122 05:04:15.602585 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g58wp\" (UniqueName: \"kubernetes.io/projected/d36b03e3-7b82-45ae-8ba7-59af108509b7-kube-api-access-g58wp\") on node \"crc\" DevicePath \"\"" Nov 22 05:04:15 crc kubenswrapper[4948]: I1122 05:04:15.904397 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-db-create-flbjx" event={"ID":"d36b03e3-7b82-45ae-8ba7-59af108509b7","Type":"ContainerDied","Data":"248a1c34d31aaaf491cd91848d8f55b93fe4693f2da10decd74383010c76f65e"} Nov 22 05:04:15 crc kubenswrapper[4948]: I1122 05:04:15.904439 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="248a1c34d31aaaf491cd91848d8f55b93fe4693f2da10decd74383010c76f65e" Nov 22 05:04:15 crc kubenswrapper[4948]: I1122 05:04:15.904485 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-create-flbjx" Nov 22 05:04:29 crc kubenswrapper[4948]: I1122 05:04:29.789426 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:04:29 crc kubenswrapper[4948]: I1122 05:04:29.790191 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.002622 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/keystone-b2aa-account-create-g94z9"] Nov 22 05:04:30 crc kubenswrapper[4948]: E1122 05:04:30.002972 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d36b03e3-7b82-45ae-8ba7-59af108509b7" containerName="mariadb-database-create" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.002999 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d36b03e3-7b82-45ae-8ba7-59af108509b7" containerName="mariadb-database-create" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.003183 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d36b03e3-7b82-45ae-8ba7-59af108509b7" containerName="mariadb-database-create" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.003817 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.007681 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-db-secret" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.012756 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-b2aa-account-create-g94z9"] Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.016211 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r599f\" (UniqueName: \"kubernetes.io/projected/68eb22ec-e675-4253-b5fa-61da8b4d5050-kube-api-access-r599f\") pod \"keystone-b2aa-account-create-g94z9\" (UID: \"68eb22ec-e675-4253-b5fa-61da8b4d5050\") " pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.117544 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r599f\" (UniqueName: \"kubernetes.io/projected/68eb22ec-e675-4253-b5fa-61da8b4d5050-kube-api-access-r599f\") pod \"keystone-b2aa-account-create-g94z9\" (UID: \"68eb22ec-e675-4253-b5fa-61da8b4d5050\") " pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.150014 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r599f\" (UniqueName: \"kubernetes.io/projected/68eb22ec-e675-4253-b5fa-61da8b4d5050-kube-api-access-r599f\") pod \"keystone-b2aa-account-create-g94z9\" (UID: \"68eb22ec-e675-4253-b5fa-61da8b4d5050\") " pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" Nov 22 05:04:30 crc kubenswrapper[4948]: I1122 05:04:30.352197 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" Nov 22 05:04:31 crc kubenswrapper[4948]: E1122 05:04:31.781101 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/ceph/demo:latest-squid" Nov 22 05:04:31 crc kubenswrapper[4948]: E1122 05:04:31.781730 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceph,Image:quay.io/ceph/demo:latest-squid,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:MON_IP,Value:192.168.126.11,ValueFrom:nil,},EnvVar{Name:CEPH_DAEMON,Value:demo,ValueFrom:nil,},EnvVar{Name:CEPH_PUBLIC_NETWORK,Value:0.0.0.0/0,ValueFrom:nil,},EnvVar{Name:DEMO_DAEMONS,Value:osd,mds,rgw,ValueFrom:nil,},EnvVar{Name:CEPH_DEMO_UID,Value:0,ValueFrom:nil,},EnvVar{Name:RGW_NAME,Value:ceph,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:data,ReadOnly:false,MountPath:/var/lib/ceph,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log,ReadOnly:false,MountPath:/var/log/ceph,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run,ReadOnly:false,MountPath:/run/ceph,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6rvcj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceph_manila-kuttl-tests(ba3d7d7d-eba9-4f3c-8259-9ec7aab02754): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 05:04:31 crc kubenswrapper[4948]: E1122 05:04:31.783302 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceph\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="manila-kuttl-tests/ceph" podUID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" Nov 22 05:04:32 crc kubenswrapper[4948]: E1122 05:04:32.016895 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceph\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/ceph/demo:latest-squid\\\"\"" pod="manila-kuttl-tests/ceph" podUID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" Nov 22 05:04:32 crc kubenswrapper[4948]: I1122 05:04:32.080542 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-b2aa-account-create-g94z9"] Nov 22 05:04:33 crc kubenswrapper[4948]: I1122 05:04:33.022870 4948 generic.go:334] "Generic (PLEG): container finished" podID="68eb22ec-e675-4253-b5fa-61da8b4d5050" containerID="de7c2ad9bf7f134a72420b8ddf1d510937659832a4e061d216875174d2864188" exitCode=0 Nov 22 05:04:33 crc kubenswrapper[4948]: I1122 05:04:33.022978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" event={"ID":"68eb22ec-e675-4253-b5fa-61da8b4d5050","Type":"ContainerDied","Data":"de7c2ad9bf7f134a72420b8ddf1d510937659832a4e061d216875174d2864188"} Nov 22 05:04:33 crc kubenswrapper[4948]: I1122 05:04:33.023227 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" event={"ID":"68eb22ec-e675-4253-b5fa-61da8b4d5050","Type":"ContainerStarted","Data":"038d23acad6af26d01743432e0880fa8f80841d26144cb7a0cd4777976274610"} Nov 22 05:04:34 crc kubenswrapper[4948]: I1122 05:04:34.288400 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" Nov 22 05:04:34 crc kubenswrapper[4948]: I1122 05:04:34.477551 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r599f\" (UniqueName: \"kubernetes.io/projected/68eb22ec-e675-4253-b5fa-61da8b4d5050-kube-api-access-r599f\") pod \"68eb22ec-e675-4253-b5fa-61da8b4d5050\" (UID: \"68eb22ec-e675-4253-b5fa-61da8b4d5050\") " Nov 22 05:04:34 crc kubenswrapper[4948]: I1122 05:04:34.482700 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68eb22ec-e675-4253-b5fa-61da8b4d5050-kube-api-access-r599f" (OuterVolumeSpecName: "kube-api-access-r599f") pod "68eb22ec-e675-4253-b5fa-61da8b4d5050" (UID: "68eb22ec-e675-4253-b5fa-61da8b4d5050"). InnerVolumeSpecName "kube-api-access-r599f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:04:34 crc kubenswrapper[4948]: I1122 05:04:34.580155 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r599f\" (UniqueName: \"kubernetes.io/projected/68eb22ec-e675-4253-b5fa-61da8b4d5050-kube-api-access-r599f\") on node \"crc\" DevicePath \"\"" Nov 22 05:04:35 crc kubenswrapper[4948]: I1122 05:04:35.040700 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" event={"ID":"68eb22ec-e675-4253-b5fa-61da8b4d5050","Type":"ContainerDied","Data":"038d23acad6af26d01743432e0880fa8f80841d26144cb7a0cd4777976274610"} Nov 22 05:04:35 crc kubenswrapper[4948]: I1122 05:04:35.040748 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="038d23acad6af26d01743432e0880fa8f80841d26144cb7a0cd4777976274610" Nov 22 05:04:35 crc kubenswrapper[4948]: I1122 05:04:35.040760 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-b2aa-account-create-g94z9" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.577824 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/keystone-db-sync-ngm5n"] Nov 22 05:04:40 crc kubenswrapper[4948]: E1122 05:04:40.578726 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68eb22ec-e675-4253-b5fa-61da8b4d5050" containerName="mariadb-account-create" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.578753 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="68eb22ec-e675-4253-b5fa-61da8b4d5050" containerName="mariadb-account-create" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.578960 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="68eb22ec-e675-4253-b5fa-61da8b4d5050" containerName="mariadb-account-create" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.579580 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.582158 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-scripts" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.582349 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.582739 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-config-data" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.582829 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-keystone-dockercfg-jtg5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.587253 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-db-sync-ngm5n"] Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.665951 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-config-data\") pod \"keystone-db-sync-ngm5n\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.666029 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2kfm\" (UniqueName: \"kubernetes.io/projected/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-kube-api-access-b2kfm\") pod \"keystone-db-sync-ngm5n\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.772700 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-config-data\") pod \"keystone-db-sync-ngm5n\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.772819 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2kfm\" (UniqueName: \"kubernetes.io/projected/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-kube-api-access-b2kfm\") pod \"keystone-db-sync-ngm5n\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.785180 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-config-data\") pod \"keystone-db-sync-ngm5n\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.789139 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2kfm\" (UniqueName: \"kubernetes.io/projected/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-kube-api-access-b2kfm\") pod \"keystone-db-sync-ngm5n\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:40 crc kubenswrapper[4948]: I1122 05:04:40.908299 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:04:41 crc kubenswrapper[4948]: I1122 05:04:41.321084 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-db-sync-ngm5n"] Nov 22 05:04:42 crc kubenswrapper[4948]: I1122 05:04:42.085964 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" event={"ID":"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4","Type":"ContainerStarted","Data":"fa138ccc9cea695486057272698ffe8b23044a632588a8a2b08232c56c56d076"} Nov 22 05:04:49 crc kubenswrapper[4948]: I1122 05:04:49.135406 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/ceph" event={"ID":"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754","Type":"ContainerStarted","Data":"91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba"} Nov 22 05:04:49 crc kubenswrapper[4948]: I1122 05:04:49.137722 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" event={"ID":"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4","Type":"ContainerStarted","Data":"f55ff0d0e7d49582d342dd8159a7f4cd59f36a614451579f2daa83a99d775c2c"} Nov 22 05:04:49 crc kubenswrapper[4948]: I1122 05:04:49.150851 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/ceph" podStartSLOduration=1.3681106490000001 podStartE2EDuration="39.150830836s" podCreationTimestamp="2025-11-22 05:04:10 +0000 UTC" firstStartedPulling="2025-11-22 05:04:10.696348178 +0000 UTC m=+1053.382358694" lastFinishedPulling="2025-11-22 05:04:48.479068365 +0000 UTC m=+1091.165078881" observedRunningTime="2025-11-22 05:04:49.148824799 +0000 UTC m=+1091.834835315" watchObservedRunningTime="2025-11-22 05:04:49.150830836 +0000 UTC m=+1091.836841352" Nov 22 05:04:49 crc kubenswrapper[4948]: I1122 05:04:49.166414 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" podStartSLOduration=2.010823866 podStartE2EDuration="9.166392426s" podCreationTimestamp="2025-11-22 05:04:40 +0000 UTC" firstStartedPulling="2025-11-22 05:04:41.339528249 +0000 UTC m=+1084.025538765" lastFinishedPulling="2025-11-22 05:04:48.495096809 +0000 UTC m=+1091.181107325" observedRunningTime="2025-11-22 05:04:49.164664197 +0000 UTC m=+1091.850674713" watchObservedRunningTime="2025-11-22 05:04:49.166392426 +0000 UTC m=+1091.852402952" Nov 22 05:04:49 crc kubenswrapper[4948]: E1122 05:04:49.904598 4948 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:53744->38.102.83.223:45565: write tcp 38.102.83.223:53744->38.102.83.223:45565: write: broken pipe Nov 22 05:04:59 crc kubenswrapper[4948]: I1122 05:04:59.790278 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:04:59 crc kubenswrapper[4948]: I1122 05:04:59.790922 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:04:59 crc kubenswrapper[4948]: I1122 05:04:59.790974 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 05:04:59 crc kubenswrapper[4948]: I1122 05:04:59.791527 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"82cd1f7ac46cb027948972361fb8a42fe8301bdb56d8d047033cf856f1c72a5c"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 05:04:59 crc kubenswrapper[4948]: I1122 05:04:59.791585 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://82cd1f7ac46cb027948972361fb8a42fe8301bdb56d8d047033cf856f1c72a5c" gracePeriod=600 Nov 22 05:05:01 crc kubenswrapper[4948]: I1122 05:05:01.237959 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="82cd1f7ac46cb027948972361fb8a42fe8301bdb56d8d047033cf856f1c72a5c" exitCode=0 Nov 22 05:05:01 crc kubenswrapper[4948]: I1122 05:05:01.238042 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"82cd1f7ac46cb027948972361fb8a42fe8301bdb56d8d047033cf856f1c72a5c"} Nov 22 05:05:01 crc kubenswrapper[4948]: I1122 05:05:01.238450 4948 scope.go:117] "RemoveContainer" containerID="d1063d3de2076e619f45df2f69f8b545ea06fe47defd910d05a953ec8383f798" Nov 22 05:05:05 crc kubenswrapper[4948]: E1122 05:05:05.908590 4948 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:57922->38.102.83.223:45565: write tcp 38.102.83.223:57922->38.102.83.223:45565: write: connection reset by peer Nov 22 05:05:15 crc kubenswrapper[4948]: E1122 05:05:15.818723 4948 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:54576->38.102.83.223:45565: write tcp 38.102.83.223:54576->38.102.83.223:45565: write: broken pipe Nov 22 05:05:18 crc kubenswrapper[4948]: I1122 05:05:18.391512 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"a0943f4e445f12f6dc5d4e849eee67dd81bfc6359d81ae42402f37fb54747939"} Nov 22 05:05:35 crc kubenswrapper[4948]: E1122 05:05:35.683617 4948 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:57252->38.102.83.223:45565: write tcp 38.102.83.223:57252->38.102.83.223:45565: write: broken pipe Nov 22 05:05:37 crc kubenswrapper[4948]: I1122 05:05:37.540592 4948 generic.go:334] "Generic (PLEG): container finished" podID="3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4" containerID="f55ff0d0e7d49582d342dd8159a7f4cd59f36a614451579f2daa83a99d775c2c" exitCode=0 Nov 22 05:05:37 crc kubenswrapper[4948]: I1122 05:05:37.540660 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" event={"ID":"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4","Type":"ContainerDied","Data":"f55ff0d0e7d49582d342dd8159a7f4cd59f36a614451579f2daa83a99d775c2c"} Nov 22 05:05:38 crc kubenswrapper[4948]: I1122 05:05:38.872966 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.062799 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2kfm\" (UniqueName: \"kubernetes.io/projected/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-kube-api-access-b2kfm\") pod \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.063101 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-config-data\") pod \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\" (UID: \"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4\") " Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.075666 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-kube-api-access-b2kfm" (OuterVolumeSpecName: "kube-api-access-b2kfm") pod "3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4" (UID: "3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4"). InnerVolumeSpecName "kube-api-access-b2kfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.098909 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-config-data" (OuterVolumeSpecName: "config-data") pod "3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4" (UID: "3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.164873 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.164917 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2kfm\" (UniqueName: \"kubernetes.io/projected/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4-kube-api-access-b2kfm\") on node \"crc\" DevicePath \"\"" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.555741 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" event={"ID":"3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4","Type":"ContainerDied","Data":"fa138ccc9cea695486057272698ffe8b23044a632588a8a2b08232c56c56d076"} Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.555822 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa138ccc9cea695486057272698ffe8b23044a632588a8a2b08232c56c56d076" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.555836 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-db-sync-ngm5n" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.781915 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/keystone-bootstrap-68cnx"] Nov 22 05:05:39 crc kubenswrapper[4948]: E1122 05:05:39.782270 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4" containerName="keystone-db-sync" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.782292 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4" containerName="keystone-db-sync" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.782438 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4" containerName="keystone-db-sync" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.783040 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.785947 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-keystone-dockercfg-jtg5n" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.786214 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-config-data" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.788410 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-scripts" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.788665 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.790119 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-bootstrap-68cnx"] Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.977837 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-credential-keys\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.978139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-scripts\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.978161 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-fernet-keys\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.979656 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-config-data\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:39 crc kubenswrapper[4948]: I1122 05:05:39.979807 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6grww\" (UniqueName: \"kubernetes.io/projected/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-kube-api-access-6grww\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.081051 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-credential-keys\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.081335 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-scripts\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.081490 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-fernet-keys\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.081665 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-config-data\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.081811 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6grww\" (UniqueName: \"kubernetes.io/projected/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-kube-api-access-6grww\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.089102 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-scripts\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.089168 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-config-data\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.092302 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-credential-keys\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.093062 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-fernet-keys\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.104136 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6grww\" (UniqueName: \"kubernetes.io/projected/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-kube-api-access-6grww\") pod \"keystone-bootstrap-68cnx\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.110567 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.514901 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-bootstrap-68cnx"] Nov 22 05:05:40 crc kubenswrapper[4948]: I1122 05:05:40.565684 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" event={"ID":"6c11a057-0fc1-4b03-83e3-f8be2decfcc5","Type":"ContainerStarted","Data":"e41c93530d5d5f8ac91a06e421791622ae628f2fc3d1f421373f1a530b54a18f"} Nov 22 05:05:41 crc kubenswrapper[4948]: I1122 05:05:41.581633 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" event={"ID":"6c11a057-0fc1-4b03-83e3-f8be2decfcc5","Type":"ContainerStarted","Data":"dc3a0c45e74648a639cdd91acdb2c2134f423b4c9dc127925ba3a516fb454544"} Nov 22 05:05:41 crc kubenswrapper[4948]: I1122 05:05:41.603111 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" podStartSLOduration=2.603092663 podStartE2EDuration="2.603092663s" podCreationTimestamp="2025-11-22 05:05:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:05:41.599151681 +0000 UTC m=+1144.285162197" watchObservedRunningTime="2025-11-22 05:05:41.603092663 +0000 UTC m=+1144.289103179" Nov 22 05:05:43 crc kubenswrapper[4948]: I1122 05:05:43.599227 4948 generic.go:334] "Generic (PLEG): container finished" podID="6c11a057-0fc1-4b03-83e3-f8be2decfcc5" containerID="dc3a0c45e74648a639cdd91acdb2c2134f423b4c9dc127925ba3a516fb454544" exitCode=0 Nov 22 05:05:43 crc kubenswrapper[4948]: I1122 05:05:43.599359 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" event={"ID":"6c11a057-0fc1-4b03-83e3-f8be2decfcc5","Type":"ContainerDied","Data":"dc3a0c45e74648a639cdd91acdb2c2134f423b4c9dc127925ba3a516fb454544"} Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.871227 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.960083 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-scripts\") pod \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.960387 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-credential-keys\") pod \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.960552 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6grww\" (UniqueName: \"kubernetes.io/projected/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-kube-api-access-6grww\") pod \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.960712 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-fernet-keys\") pod \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.960816 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-config-data\") pod \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\" (UID: \"6c11a057-0fc1-4b03-83e3-f8be2decfcc5\") " Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.965505 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-kube-api-access-6grww" (OuterVolumeSpecName: "kube-api-access-6grww") pod "6c11a057-0fc1-4b03-83e3-f8be2decfcc5" (UID: "6c11a057-0fc1-4b03-83e3-f8be2decfcc5"). InnerVolumeSpecName "kube-api-access-6grww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.967186 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6c11a057-0fc1-4b03-83e3-f8be2decfcc5" (UID: "6c11a057-0fc1-4b03-83e3-f8be2decfcc5"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.968816 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-scripts" (OuterVolumeSpecName: "scripts") pod "6c11a057-0fc1-4b03-83e3-f8be2decfcc5" (UID: "6c11a057-0fc1-4b03-83e3-f8be2decfcc5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.971568 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6c11a057-0fc1-4b03-83e3-f8be2decfcc5" (UID: "6c11a057-0fc1-4b03-83e3-f8be2decfcc5"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:05:44 crc kubenswrapper[4948]: I1122 05:05:44.978787 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-config-data" (OuterVolumeSpecName: "config-data") pod "6c11a057-0fc1-4b03-83e3-f8be2decfcc5" (UID: "6c11a057-0fc1-4b03-83e3-f8be2decfcc5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.063182 4948 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.063227 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.063240 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.063251 4948 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.063265 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6grww\" (UniqueName: \"kubernetes.io/projected/6c11a057-0fc1-4b03-83e3-f8be2decfcc5-kube-api-access-6grww\") on node \"crc\" DevicePath \"\"" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.614260 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" event={"ID":"6c11a057-0fc1-4b03-83e3-f8be2decfcc5","Type":"ContainerDied","Data":"e41c93530d5d5f8ac91a06e421791622ae628f2fc3d1f421373f1a530b54a18f"} Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.614642 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e41c93530d5d5f8ac91a06e421791622ae628f2fc3d1f421373f1a530b54a18f" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.614711 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-bootstrap-68cnx" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.682074 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/keystone-66bcbf78b6-ndxxc"] Nov 22 05:05:45 crc kubenswrapper[4948]: E1122 05:05:45.682364 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c11a057-0fc1-4b03-83e3-f8be2decfcc5" containerName="keystone-bootstrap" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.682383 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c11a057-0fc1-4b03-83e3-f8be2decfcc5" containerName="keystone-bootstrap" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.682561 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c11a057-0fc1-4b03-83e3-f8be2decfcc5" containerName="keystone-bootstrap" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.683091 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.685732 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.685773 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-keystone-dockercfg-jtg5n" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.685947 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-config-data" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.686671 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"keystone-scripts" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.692306 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-66bcbf78b6-ndxxc"] Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.773484 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svlzv\" (UniqueName: \"kubernetes.io/projected/271e3d90-f82c-4001-8df0-acf407e4743a-kube-api-access-svlzv\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.773547 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-scripts\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.773674 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-config-data\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.773734 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-fernet-keys\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.773757 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-credential-keys\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.874744 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-fernet-keys\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.874790 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-credential-keys\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.874844 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svlzv\" (UniqueName: \"kubernetes.io/projected/271e3d90-f82c-4001-8df0-acf407e4743a-kube-api-access-svlzv\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.874887 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-scripts\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.874932 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-config-data\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.878313 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-scripts\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.878502 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-fernet-keys\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.878538 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-config-data\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.879165 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-credential-keys\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.889511 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svlzv\" (UniqueName: \"kubernetes.io/projected/271e3d90-f82c-4001-8df0-acf407e4743a-kube-api-access-svlzv\") pod \"keystone-66bcbf78b6-ndxxc\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:45 crc kubenswrapper[4948]: I1122 05:05:45.998737 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:46 crc kubenswrapper[4948]: I1122 05:05:46.187424 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystone-66bcbf78b6-ndxxc"] Nov 22 05:05:46 crc kubenswrapper[4948]: I1122 05:05:46.622794 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" event={"ID":"271e3d90-f82c-4001-8df0-acf407e4743a","Type":"ContainerStarted","Data":"e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa"} Nov 22 05:05:46 crc kubenswrapper[4948]: I1122 05:05:46.623139 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:05:46 crc kubenswrapper[4948]: I1122 05:05:46.623153 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" event={"ID":"271e3d90-f82c-4001-8df0-acf407e4743a","Type":"ContainerStarted","Data":"591c4320f471cb5b49ddb2425de1aa3c7b669555a68102b99cce6fb8dfeecbca"} Nov 22 05:05:46 crc kubenswrapper[4948]: I1122 05:05:46.666814 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" podStartSLOduration=1.666789143 podStartE2EDuration="1.666789143s" podCreationTimestamp="2025-11-22 05:05:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:05:46.661746849 +0000 UTC m=+1149.347757365" watchObservedRunningTime="2025-11-22 05:05:46.666789143 +0000 UTC m=+1149.352799659" Nov 22 05:05:51 crc kubenswrapper[4948]: E1122 05:05:51.484507 4948 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.223:60936->38.102.83.223:45565: write tcp 38.102.83.223:60936->38.102.83.223:45565: write: connection reset by peer Nov 22 05:06:17 crc kubenswrapper[4948]: I1122 05:06:17.441886 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.465817 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-index-fxd8c"] Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.467093 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.476559 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-index-dockercfg-tqqxg" Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.478577 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-index-fxd8c"] Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.511624 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffxks\" (UniqueName: \"kubernetes.io/projected/45443cdd-98a8-4fc6-bc0e-d621318951ad-kube-api-access-ffxks\") pod \"manila-operator-index-fxd8c\" (UID: \"45443cdd-98a8-4fc6-bc0e-d621318951ad\") " pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.613394 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffxks\" (UniqueName: \"kubernetes.io/projected/45443cdd-98a8-4fc6-bc0e-d621318951ad-kube-api-access-ffxks\") pod \"manila-operator-index-fxd8c\" (UID: \"45443cdd-98a8-4fc6-bc0e-d621318951ad\") " pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.638015 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffxks\" (UniqueName: \"kubernetes.io/projected/45443cdd-98a8-4fc6-bc0e-d621318951ad-kube-api-access-ffxks\") pod \"manila-operator-index-fxd8c\" (UID: \"45443cdd-98a8-4fc6-bc0e-d621318951ad\") " pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:24 crc kubenswrapper[4948]: I1122 05:06:24.793636 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:25 crc kubenswrapper[4948]: I1122 05:06:25.257829 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-index-fxd8c"] Nov 22 05:06:25 crc kubenswrapper[4948]: W1122 05:06:25.262091 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45443cdd_98a8_4fc6_bc0e_d621318951ad.slice/crio-ad296a989570c830bfe5e5f1012e4a496dfec6b7122398b43bde968e2fefca05 WatchSource:0}: Error finding container ad296a989570c830bfe5e5f1012e4a496dfec6b7122398b43bde968e2fefca05: Status 404 returned error can't find the container with id ad296a989570c830bfe5e5f1012e4a496dfec6b7122398b43bde968e2fefca05 Nov 22 05:06:25 crc kubenswrapper[4948]: I1122 05:06:25.907561 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-index-fxd8c" event={"ID":"45443cdd-98a8-4fc6-bc0e-d621318951ad","Type":"ContainerStarted","Data":"ad296a989570c830bfe5e5f1012e4a496dfec6b7122398b43bde968e2fefca05"} Nov 22 05:06:27 crc kubenswrapper[4948]: I1122 05:06:27.931830 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-index-fxd8c" event={"ID":"45443cdd-98a8-4fc6-bc0e-d621318951ad","Type":"ContainerStarted","Data":"c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354"} Nov 22 05:06:30 crc kubenswrapper[4948]: I1122 05:06:30.030593 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-index-fxd8c" podStartSLOduration=3.613252735 podStartE2EDuration="6.03057134s" podCreationTimestamp="2025-11-22 05:06:24 +0000 UTC" firstStartedPulling="2025-11-22 05:06:25.264233982 +0000 UTC m=+1187.950244498" lastFinishedPulling="2025-11-22 05:06:27.681552577 +0000 UTC m=+1190.367563103" observedRunningTime="2025-11-22 05:06:30.024496698 +0000 UTC m=+1192.710507214" watchObservedRunningTime="2025-11-22 05:06:30.03057134 +0000 UTC m=+1192.716581876" Nov 22 05:06:34 crc kubenswrapper[4948]: I1122 05:06:34.794359 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:34 crc kubenswrapper[4948]: I1122 05:06:34.794970 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:34 crc kubenswrapper[4948]: I1122 05:06:34.828684 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:35 crc kubenswrapper[4948]: I1122 05:06:35.107249 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:06:37 crc kubenswrapper[4948]: I1122 05:06:37.724253 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2"] Nov 22 05:06:37 crc kubenswrapper[4948]: I1122 05:06:37.727715 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:37 crc kubenswrapper[4948]: I1122 05:06:37.730904 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-77z46" Nov 22 05:06:37 crc kubenswrapper[4948]: I1122 05:06:37.757413 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2"] Nov 22 05:06:37 crc kubenswrapper[4948]: I1122 05:06:37.917027 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-bundle\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:37 crc kubenswrapper[4948]: I1122 05:06:37.917154 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfrvp\" (UniqueName: \"kubernetes.io/projected/2f1c28d6-a954-42f2-9313-3c35189441bf-kube-api-access-jfrvp\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:37 crc kubenswrapper[4948]: I1122 05:06:37.917192 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-util\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.019152 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-bundle\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.019748 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfrvp\" (UniqueName: \"kubernetes.io/projected/2f1c28d6-a954-42f2-9313-3c35189441bf-kube-api-access-jfrvp\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.019794 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-bundle\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.020065 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-util\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.020439 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-util\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.035813 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfrvp\" (UniqueName: \"kubernetes.io/projected/2f1c28d6-a954-42f2-9313-3c35189441bf-kube-api-access-jfrvp\") pod \"8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.052674 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:38 crc kubenswrapper[4948]: I1122 05:06:38.255971 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2"] Nov 22 05:06:39 crc kubenswrapper[4948]: I1122 05:06:39.097198 4948 generic.go:334] "Generic (PLEG): container finished" podID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerID="b0ec0f8e16ca129f740a2cbc1055abaf33e10320a61b4b39de9c90b75b0bbd36" exitCode=0 Nov 22 05:06:39 crc kubenswrapper[4948]: I1122 05:06:39.097294 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" event={"ID":"2f1c28d6-a954-42f2-9313-3c35189441bf","Type":"ContainerDied","Data":"b0ec0f8e16ca129f740a2cbc1055abaf33e10320a61b4b39de9c90b75b0bbd36"} Nov 22 05:06:39 crc kubenswrapper[4948]: I1122 05:06:39.097654 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" event={"ID":"2f1c28d6-a954-42f2-9313-3c35189441bf","Type":"ContainerStarted","Data":"4740fa5c703cbd8c9758c3540ade0c59af8668abf124132fa51ff5d3194efbef"} Nov 22 05:06:41 crc kubenswrapper[4948]: I1122 05:06:41.115375 4948 generic.go:334] "Generic (PLEG): container finished" podID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerID="21d58ebe739638e60ac63c8c62e34bc9b1be3ab2de4fdf0ede14b6342a302623" exitCode=0 Nov 22 05:06:41 crc kubenswrapper[4948]: I1122 05:06:41.115552 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" event={"ID":"2f1c28d6-a954-42f2-9313-3c35189441bf","Type":"ContainerDied","Data":"21d58ebe739638e60ac63c8c62e34bc9b1be3ab2de4fdf0ede14b6342a302623"} Nov 22 05:06:42 crc kubenswrapper[4948]: I1122 05:06:42.125383 4948 generic.go:334] "Generic (PLEG): container finished" podID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerID="b24bcd9db4b450e8141e73cb00a9c2ea5b3440f71607cf386fcb14af33430722" exitCode=0 Nov 22 05:06:42 crc kubenswrapper[4948]: I1122 05:06:42.125435 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" event={"ID":"2f1c28d6-a954-42f2-9313-3c35189441bf","Type":"ContainerDied","Data":"b24bcd9db4b450e8141e73cb00a9c2ea5b3440f71607cf386fcb14af33430722"} Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.376634 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.510596 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-bundle\") pod \"2f1c28d6-a954-42f2-9313-3c35189441bf\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.510667 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-util\") pod \"2f1c28d6-a954-42f2-9313-3c35189441bf\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.510706 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfrvp\" (UniqueName: \"kubernetes.io/projected/2f1c28d6-a954-42f2-9313-3c35189441bf-kube-api-access-jfrvp\") pod \"2f1c28d6-a954-42f2-9313-3c35189441bf\" (UID: \"2f1c28d6-a954-42f2-9313-3c35189441bf\") " Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.511600 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-bundle" (OuterVolumeSpecName: "bundle") pod "2f1c28d6-a954-42f2-9313-3c35189441bf" (UID: "2f1c28d6-a954-42f2-9313-3c35189441bf"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.519251 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f1c28d6-a954-42f2-9313-3c35189441bf-kube-api-access-jfrvp" (OuterVolumeSpecName: "kube-api-access-jfrvp") pod "2f1c28d6-a954-42f2-9313-3c35189441bf" (UID: "2f1c28d6-a954-42f2-9313-3c35189441bf"). InnerVolumeSpecName "kube-api-access-jfrvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.525735 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-util" (OuterVolumeSpecName: "util") pod "2f1c28d6-a954-42f2-9313-3c35189441bf" (UID: "2f1c28d6-a954-42f2-9313-3c35189441bf"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.612264 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.612304 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2f1c28d6-a954-42f2-9313-3c35189441bf-util\") on node \"crc\" DevicePath \"\"" Nov 22 05:06:43 crc kubenswrapper[4948]: I1122 05:06:43.612317 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfrvp\" (UniqueName: \"kubernetes.io/projected/2f1c28d6-a954-42f2-9313-3c35189441bf-kube-api-access-jfrvp\") on node \"crc\" DevicePath \"\"" Nov 22 05:06:44 crc kubenswrapper[4948]: I1122 05:06:44.142339 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" event={"ID":"2f1c28d6-a954-42f2-9313-3c35189441bf","Type":"ContainerDied","Data":"4740fa5c703cbd8c9758c3540ade0c59af8668abf124132fa51ff5d3194efbef"} Nov 22 05:06:44 crc kubenswrapper[4948]: I1122 05:06:44.142400 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4740fa5c703cbd8c9758c3540ade0c59af8668abf124132fa51ff5d3194efbef" Nov 22 05:06:44 crc kubenswrapper[4948]: I1122 05:06:44.142427 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.683895 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs"] Nov 22 05:06:52 crc kubenswrapper[4948]: E1122 05:06:52.684843 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerName="extract" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.684858 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerName="extract" Nov 22 05:06:52 crc kubenswrapper[4948]: E1122 05:06:52.684874 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerName="util" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.684882 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerName="util" Nov 22 05:06:52 crc kubenswrapper[4948]: E1122 05:06:52.684895 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerName="pull" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.684903 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerName="pull" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.685075 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" containerName="extract" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.685976 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.687956 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-service-cert" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.687972 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-4sbpb" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.698100 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs"] Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.766625 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-apiservice-cert\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.766672 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnr4g\" (UniqueName: \"kubernetes.io/projected/4704d599-bafd-404a-96b3-9cf06bf0658f-kube-api-access-dnr4g\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.766708 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-webhook-cert\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.868013 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-webhook-cert\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.868129 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-apiservice-cert\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.868152 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnr4g\" (UniqueName: \"kubernetes.io/projected/4704d599-bafd-404a-96b3-9cf06bf0658f-kube-api-access-dnr4g\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.875239 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-apiservice-cert\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.875239 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-webhook-cert\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:52 crc kubenswrapper[4948]: I1122 05:06:52.885180 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnr4g\" (UniqueName: \"kubernetes.io/projected/4704d599-bafd-404a-96b3-9cf06bf0658f-kube-api-access-dnr4g\") pod \"manila-operator-controller-manager-7d5c54747-f2qjs\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:53 crc kubenswrapper[4948]: I1122 05:06:53.010281 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:53 crc kubenswrapper[4948]: I1122 05:06:53.492825 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs"] Nov 22 05:06:53 crc kubenswrapper[4948]: W1122 05:06:53.498076 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4704d599_bafd_404a_96b3_9cf06bf0658f.slice/crio-40af8d95214022807ab40bf868d0076fc09b928929d77f43a916240ee2fb3838 WatchSource:0}: Error finding container 40af8d95214022807ab40bf868d0076fc09b928929d77f43a916240ee2fb3838: Status 404 returned error can't find the container with id 40af8d95214022807ab40bf868d0076fc09b928929d77f43a916240ee2fb3838 Nov 22 05:06:54 crc kubenswrapper[4948]: I1122 05:06:54.259432 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" event={"ID":"4704d599-bafd-404a-96b3-9cf06bf0658f","Type":"ContainerStarted","Data":"40af8d95214022807ab40bf868d0076fc09b928929d77f43a916240ee2fb3838"} Nov 22 05:06:58 crc kubenswrapper[4948]: I1122 05:06:58.302969 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" event={"ID":"4704d599-bafd-404a-96b3-9cf06bf0658f","Type":"ContainerStarted","Data":"f1498ecab56492bae27357ec7b7d41d601e3f9c44a1eaade88b36ff137201781"} Nov 22 05:06:59 crc kubenswrapper[4948]: I1122 05:06:59.311167 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" event={"ID":"4704d599-bafd-404a-96b3-9cf06bf0658f","Type":"ContainerStarted","Data":"cf063af18bf18ab24249018e15a5eec38650eeaddd4610de0a547f77094e8f90"} Nov 22 05:06:59 crc kubenswrapper[4948]: I1122 05:06:59.312008 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:06:59 crc kubenswrapper[4948]: I1122 05:06:59.334755 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" podStartSLOduration=1.9397989610000002 podStartE2EDuration="7.334723546s" podCreationTimestamp="2025-11-22 05:06:52 +0000 UTC" firstStartedPulling="2025-11-22 05:06:53.500297109 +0000 UTC m=+1216.186307655" lastFinishedPulling="2025-11-22 05:06:58.895221724 +0000 UTC m=+1221.581232240" observedRunningTime="2025-11-22 05:06:59.329781975 +0000 UTC m=+1222.015792511" watchObservedRunningTime="2025-11-22 05:06:59.334723546 +0000 UTC m=+1222.020734062" Nov 22 05:07:03 crc kubenswrapper[4948]: I1122 05:07:03.014942 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:07:06 crc kubenswrapper[4948]: I1122 05:07:06.661448 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-db-create-qhh2s"] Nov 22 05:07:06 crc kubenswrapper[4948]: I1122 05:07:06.663530 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-qhh2s" Nov 22 05:07:06 crc kubenswrapper[4948]: I1122 05:07:06.674743 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-create-qhh2s"] Nov 22 05:07:06 crc kubenswrapper[4948]: I1122 05:07:06.682701 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g678n\" (UniqueName: \"kubernetes.io/projected/06c3bb7d-c3ce-42e2-a6c2-305893a6d59e-kube-api-access-g678n\") pod \"manila-db-create-qhh2s\" (UID: \"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e\") " pod="manila-kuttl-tests/manila-db-create-qhh2s" Nov 22 05:07:06 crc kubenswrapper[4948]: I1122 05:07:06.783735 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g678n\" (UniqueName: \"kubernetes.io/projected/06c3bb7d-c3ce-42e2-a6c2-305893a6d59e-kube-api-access-g678n\") pod \"manila-db-create-qhh2s\" (UID: \"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e\") " pod="manila-kuttl-tests/manila-db-create-qhh2s" Nov 22 05:07:06 crc kubenswrapper[4948]: I1122 05:07:06.801379 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g678n\" (UniqueName: \"kubernetes.io/projected/06c3bb7d-c3ce-42e2-a6c2-305893a6d59e-kube-api-access-g678n\") pod \"manila-db-create-qhh2s\" (UID: \"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e\") " pod="manila-kuttl-tests/manila-db-create-qhh2s" Nov 22 05:07:06 crc kubenswrapper[4948]: I1122 05:07:06.982945 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-qhh2s" Nov 22 05:07:07 crc kubenswrapper[4948]: I1122 05:07:07.259714 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-create-qhh2s"] Nov 22 05:07:07 crc kubenswrapper[4948]: I1122 05:07:07.365596 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-qhh2s" event={"ID":"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e","Type":"ContainerStarted","Data":"6d3118200b573a3816955aa7ccccd132056e90498a06e9cd546d6b85408f3e69"} Nov 22 05:07:08 crc kubenswrapper[4948]: I1122 05:07:08.372798 4948 generic.go:334] "Generic (PLEG): container finished" podID="06c3bb7d-c3ce-42e2-a6c2-305893a6d59e" containerID="55fd401c1439972a8275317a593c72c8504a4f4f26af687c001dc7360eb88831" exitCode=0 Nov 22 05:07:08 crc kubenswrapper[4948]: I1122 05:07:08.372842 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-qhh2s" event={"ID":"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e","Type":"ContainerDied","Data":"55fd401c1439972a8275317a593c72c8504a4f4f26af687c001dc7360eb88831"} Nov 22 05:07:09 crc kubenswrapper[4948]: I1122 05:07:09.688384 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-qhh2s" Nov 22 05:07:09 crc kubenswrapper[4948]: I1122 05:07:09.724513 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g678n\" (UniqueName: \"kubernetes.io/projected/06c3bb7d-c3ce-42e2-a6c2-305893a6d59e-kube-api-access-g678n\") pod \"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e\" (UID: \"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e\") " Nov 22 05:07:09 crc kubenswrapper[4948]: I1122 05:07:09.730655 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06c3bb7d-c3ce-42e2-a6c2-305893a6d59e-kube-api-access-g678n" (OuterVolumeSpecName: "kube-api-access-g678n") pod "06c3bb7d-c3ce-42e2-a6c2-305893a6d59e" (UID: "06c3bb7d-c3ce-42e2-a6c2-305893a6d59e"). InnerVolumeSpecName "kube-api-access-g678n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:07:09 crc kubenswrapper[4948]: I1122 05:07:09.826723 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g678n\" (UniqueName: \"kubernetes.io/projected/06c3bb7d-c3ce-42e2-a6c2-305893a6d59e-kube-api-access-g678n\") on node \"crc\" DevicePath \"\"" Nov 22 05:07:10 crc kubenswrapper[4948]: I1122 05:07:10.390972 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-qhh2s" Nov 22 05:07:10 crc kubenswrapper[4948]: I1122 05:07:10.390914 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-qhh2s" event={"ID":"06c3bb7d-c3ce-42e2-a6c2-305893a6d59e","Type":"ContainerDied","Data":"6d3118200b573a3816955aa7ccccd132056e90498a06e9cd546d6b85408f3e69"} Nov 22 05:07:10 crc kubenswrapper[4948]: I1122 05:07:10.391161 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d3118200b573a3816955aa7ccccd132056e90498a06e9cd546d6b85408f3e69" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.668979 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-3383-account-create-m88tv"] Nov 22 05:07:16 crc kubenswrapper[4948]: E1122 05:07:16.669873 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c3bb7d-c3ce-42e2-a6c2-305893a6d59e" containerName="mariadb-database-create" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.669890 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c3bb7d-c3ce-42e2-a6c2-305893a6d59e" containerName="mariadb-database-create" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.670046 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="06c3bb7d-c3ce-42e2-a6c2-305893a6d59e" containerName="mariadb-database-create" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.670549 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-3383-account-create-m88tv" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.672719 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-db-secret" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.679698 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-3383-account-create-m88tv"] Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.723227 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp2d7\" (UniqueName: \"kubernetes.io/projected/abbc22fc-4f38-45dd-bc95-f7e5fc647919-kube-api-access-kp2d7\") pod \"manila-3383-account-create-m88tv\" (UID: \"abbc22fc-4f38-45dd-bc95-f7e5fc647919\") " pod="manila-kuttl-tests/manila-3383-account-create-m88tv" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.830345 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp2d7\" (UniqueName: \"kubernetes.io/projected/abbc22fc-4f38-45dd-bc95-f7e5fc647919-kube-api-access-kp2d7\") pod \"manila-3383-account-create-m88tv\" (UID: \"abbc22fc-4f38-45dd-bc95-f7e5fc647919\") " pod="manila-kuttl-tests/manila-3383-account-create-m88tv" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.867935 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp2d7\" (UniqueName: \"kubernetes.io/projected/abbc22fc-4f38-45dd-bc95-f7e5fc647919-kube-api-access-kp2d7\") pod \"manila-3383-account-create-m88tv\" (UID: \"abbc22fc-4f38-45dd-bc95-f7e5fc647919\") " pod="manila-kuttl-tests/manila-3383-account-create-m88tv" Nov 22 05:07:16 crc kubenswrapper[4948]: I1122 05:07:16.985719 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-3383-account-create-m88tv" Nov 22 05:07:17 crc kubenswrapper[4948]: I1122 05:07:17.379529 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-3383-account-create-m88tv"] Nov 22 05:07:17 crc kubenswrapper[4948]: I1122 05:07:17.444235 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-3383-account-create-m88tv" event={"ID":"abbc22fc-4f38-45dd-bc95-f7e5fc647919","Type":"ContainerStarted","Data":"0c254a6f6cd3d16c248f88bc24071d9cf32834ae4530d7e31755d6988a18bd2a"} Nov 22 05:07:18 crc kubenswrapper[4948]: I1122 05:07:18.452599 4948 generic.go:334] "Generic (PLEG): container finished" podID="abbc22fc-4f38-45dd-bc95-f7e5fc647919" containerID="c4bf0bfc3163e1d9781352303379fa1016309d6b188e6a786c7a5ed1e2f3ad0a" exitCode=0 Nov 22 05:07:18 crc kubenswrapper[4948]: I1122 05:07:18.452662 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-3383-account-create-m88tv" event={"ID":"abbc22fc-4f38-45dd-bc95-f7e5fc647919","Type":"ContainerDied","Data":"c4bf0bfc3163e1d9781352303379fa1016309d6b188e6a786c7a5ed1e2f3ad0a"} Nov 22 05:07:19 crc kubenswrapper[4948]: I1122 05:07:19.790446 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-3383-account-create-m88tv" Nov 22 05:07:19 crc kubenswrapper[4948]: I1122 05:07:19.969992 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp2d7\" (UniqueName: \"kubernetes.io/projected/abbc22fc-4f38-45dd-bc95-f7e5fc647919-kube-api-access-kp2d7\") pod \"abbc22fc-4f38-45dd-bc95-f7e5fc647919\" (UID: \"abbc22fc-4f38-45dd-bc95-f7e5fc647919\") " Nov 22 05:07:19 crc kubenswrapper[4948]: I1122 05:07:19.976585 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abbc22fc-4f38-45dd-bc95-f7e5fc647919-kube-api-access-kp2d7" (OuterVolumeSpecName: "kube-api-access-kp2d7") pod "abbc22fc-4f38-45dd-bc95-f7e5fc647919" (UID: "abbc22fc-4f38-45dd-bc95-f7e5fc647919"). InnerVolumeSpecName "kube-api-access-kp2d7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:07:20 crc kubenswrapper[4948]: I1122 05:07:20.072680 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp2d7\" (UniqueName: \"kubernetes.io/projected/abbc22fc-4f38-45dd-bc95-f7e5fc647919-kube-api-access-kp2d7\") on node \"crc\" DevicePath \"\"" Nov 22 05:07:20 crc kubenswrapper[4948]: I1122 05:07:20.470800 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-3383-account-create-m88tv" event={"ID":"abbc22fc-4f38-45dd-bc95-f7e5fc647919","Type":"ContainerDied","Data":"0c254a6f6cd3d16c248f88bc24071d9cf32834ae4530d7e31755d6988a18bd2a"} Nov 22 05:07:20 crc kubenswrapper[4948]: I1122 05:07:20.470861 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c254a6f6cd3d16c248f88bc24071d9cf32834ae4530d7e31755d6988a18bd2a" Nov 22 05:07:20 crc kubenswrapper[4948]: I1122 05:07:20.470914 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-3383-account-create-m88tv" Nov 22 05:07:21 crc kubenswrapper[4948]: I1122 05:07:21.892983 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-db-sync-pqnwf"] Nov 22 05:07:21 crc kubenswrapper[4948]: E1122 05:07:21.894534 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abbc22fc-4f38-45dd-bc95-f7e5fc647919" containerName="mariadb-account-create" Nov 22 05:07:21 crc kubenswrapper[4948]: I1122 05:07:21.894675 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="abbc22fc-4f38-45dd-bc95-f7e5fc647919" containerName="mariadb-account-create" Nov 22 05:07:21 crc kubenswrapper[4948]: I1122 05:07:21.894936 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="abbc22fc-4f38-45dd-bc95-f7e5fc647919" containerName="mariadb-account-create" Nov 22 05:07:21 crc kubenswrapper[4948]: I1122 05:07:21.895571 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:21 crc kubenswrapper[4948]: I1122 05:07:21.898644 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-config-data" Nov 22 05:07:21 crc kubenswrapper[4948]: I1122 05:07:21.898645 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-manila-dockercfg-kbpfg" Nov 22 05:07:21 crc kubenswrapper[4948]: I1122 05:07:21.922733 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-sync-pqnwf"] Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.000233 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8h25\" (UniqueName: \"kubernetes.io/projected/55019746-79bf-40ed-8fdd-a34adc3d8c02-kube-api-access-l8h25\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.000313 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-config-data\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.000738 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-job-config-data\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.102337 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8h25\" (UniqueName: \"kubernetes.io/projected/55019746-79bf-40ed-8fdd-a34adc3d8c02-kube-api-access-l8h25\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.102398 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-config-data\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.102456 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-job-config-data\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.106498 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-job-config-data\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.107970 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-config-data\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.124023 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8h25\" (UniqueName: \"kubernetes.io/projected/55019746-79bf-40ed-8fdd-a34adc3d8c02-kube-api-access-l8h25\") pod \"manila-db-sync-pqnwf\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.253311 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:07:22 crc kubenswrapper[4948]: I1122 05:07:22.484092 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-sync-pqnwf"] Nov 22 05:07:23 crc kubenswrapper[4948]: I1122 05:07:23.496952 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-pqnwf" event={"ID":"55019746-79bf-40ed-8fdd-a34adc3d8c02","Type":"ContainerStarted","Data":"f8a98d0e4f623402be621886dbd7b80c3762a176a03befffc465aba9fdb526a9"} Nov 22 05:07:29 crc kubenswrapper[4948]: I1122 05:07:29.789554 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:07:29 crc kubenswrapper[4948]: I1122 05:07:29.790055 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:07:32 crc kubenswrapper[4948]: I1122 05:07:32.558206 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-pqnwf" event={"ID":"55019746-79bf-40ed-8fdd-a34adc3d8c02","Type":"ContainerStarted","Data":"784d51b53f825d2356da5c935e856607f95f41d10d8310910cf6b8c6919c7b7b"} Nov 22 05:07:32 crc kubenswrapper[4948]: I1122 05:07:32.585848 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-db-sync-pqnwf" podStartSLOduration=2.355478319 podStartE2EDuration="11.585826697s" podCreationTimestamp="2025-11-22 05:07:21 +0000 UTC" firstStartedPulling="2025-11-22 05:07:22.499194523 +0000 UTC m=+1245.185205039" lastFinishedPulling="2025-11-22 05:07:31.729542891 +0000 UTC m=+1254.415553417" observedRunningTime="2025-11-22 05:07:32.576899374 +0000 UTC m=+1255.262909920" watchObservedRunningTime="2025-11-22 05:07:32.585826697 +0000 UTC m=+1255.271837213" Nov 22 05:07:59 crc kubenswrapper[4948]: I1122 05:07:59.789607 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:07:59 crc kubenswrapper[4948]: I1122 05:07:59.791338 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:08:11 crc kubenswrapper[4948]: I1122 05:08:11.864390 4948 generic.go:334] "Generic (PLEG): container finished" podID="55019746-79bf-40ed-8fdd-a34adc3d8c02" containerID="784d51b53f825d2356da5c935e856607f95f41d10d8310910cf6b8c6919c7b7b" exitCode=0 Nov 22 05:08:11 crc kubenswrapper[4948]: I1122 05:08:11.864450 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-pqnwf" event={"ID":"55019746-79bf-40ed-8fdd-a34adc3d8c02","Type":"ContainerDied","Data":"784d51b53f825d2356da5c935e856607f95f41d10d8310910cf6b8c6919c7b7b"} Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.124284 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.166218 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8h25\" (UniqueName: \"kubernetes.io/projected/55019746-79bf-40ed-8fdd-a34adc3d8c02-kube-api-access-l8h25\") pod \"55019746-79bf-40ed-8fdd-a34adc3d8c02\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.166257 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-config-data\") pod \"55019746-79bf-40ed-8fdd-a34adc3d8c02\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.166342 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-job-config-data\") pod \"55019746-79bf-40ed-8fdd-a34adc3d8c02\" (UID: \"55019746-79bf-40ed-8fdd-a34adc3d8c02\") " Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.173072 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "55019746-79bf-40ed-8fdd-a34adc3d8c02" (UID: "55019746-79bf-40ed-8fdd-a34adc3d8c02"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.173844 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55019746-79bf-40ed-8fdd-a34adc3d8c02-kube-api-access-l8h25" (OuterVolumeSpecName: "kube-api-access-l8h25") pod "55019746-79bf-40ed-8fdd-a34adc3d8c02" (UID: "55019746-79bf-40ed-8fdd-a34adc3d8c02"). InnerVolumeSpecName "kube-api-access-l8h25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.175250 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-config-data" (OuterVolumeSpecName: "config-data") pod "55019746-79bf-40ed-8fdd-a34adc3d8c02" (UID: "55019746-79bf-40ed-8fdd-a34adc3d8c02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.268145 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8h25\" (UniqueName: \"kubernetes.io/projected/55019746-79bf-40ed-8fdd-a34adc3d8c02-kube-api-access-l8h25\") on node \"crc\" DevicePath \"\"" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.268187 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.268199 4948 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/55019746-79bf-40ed-8fdd-a34adc3d8c02-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.881662 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-pqnwf" event={"ID":"55019746-79bf-40ed-8fdd-a34adc3d8c02","Type":"ContainerDied","Data":"f8a98d0e4f623402be621886dbd7b80c3762a176a03befffc465aba9fdb526a9"} Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.881953 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8a98d0e4f623402be621886dbd7b80c3762a176a03befffc465aba9fdb526a9" Nov 22 05:08:13 crc kubenswrapper[4948]: I1122 05:08:13.881757 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-pqnwf" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.229119 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: E1122 05:08:14.229807 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55019746-79bf-40ed-8fdd-a34adc3d8c02" containerName="manila-db-sync" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.229825 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="55019746-79bf-40ed-8fdd-a34adc3d8c02" containerName="manila-db-sync" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.230027 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="55019746-79bf-40ed-8fdd-a34adc3d8c02" containerName="manila-db-sync" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.230874 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.233049 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-scheduler-config-data" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.238884 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-config-data" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.240769 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.243013 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.243458 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-manila-dockercfg-kbpfg" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.244085 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-scripts" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.251371 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-share-share0-config-data" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.251562 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"ceph-conf-files" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.268278 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.277455 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.282638 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-scripts\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.282879 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.282907 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/506bf3aa-7e96-48b6-94dd-a3b1346cb464-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.282937 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.282967 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.282989 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.283010 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.283044 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlrrm\" (UniqueName: \"kubernetes.io/projected/e16f33c0-0a4c-48bf-8c2d-6828e2564977-kube-api-access-hlrrm\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.283070 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfnbp\" (UniqueName: \"kubernetes.io/projected/506bf3aa-7e96-48b6-94dd-a3b1346cb464-kube-api-access-jfnbp\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.283117 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-scripts\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.283141 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.283170 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-ceph\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.384829 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-scripts\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.384902 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.384938 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-ceph\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.384979 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-scripts\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.384998 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385023 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/506bf3aa-7e96-48b6-94dd-a3b1346cb464-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385051 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385094 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385118 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385139 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385175 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlrrm\" (UniqueName: \"kubernetes.io/projected/e16f33c0-0a4c-48bf-8c2d-6828e2564977-kube-api-access-hlrrm\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385197 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfnbp\" (UniqueName: \"kubernetes.io/projected/506bf3aa-7e96-48b6-94dd-a3b1346cb464-kube-api-access-jfnbp\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385614 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/506bf3aa-7e96-48b6-94dd-a3b1346cb464-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385761 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.385933 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.393360 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.393913 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.394183 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-scripts\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.395098 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-ceph\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.396040 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-scripts\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.397007 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.403182 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.423142 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlrrm\" (UniqueName: \"kubernetes.io/projected/e16f33c0-0a4c-48bf-8c2d-6828e2564977-kube-api-access-hlrrm\") pod \"manila-share-share0-0\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.439258 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfnbp\" (UniqueName: \"kubernetes.io/projected/506bf3aa-7e96-48b6-94dd-a3b1346cb464-kube-api-access-jfnbp\") pod \"manila-scheduler-0\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.543677 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.544698 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.547357 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-api-config-data" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.563918 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.587146 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.587203 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data-custom\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.587223 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-scripts\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.587295 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0db17102-ad8c-40d5-8ff3-68eed833b9e1-logs\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.587320 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psmp5\" (UniqueName: \"kubernetes.io/projected/0db17102-ad8c-40d5-8ff3-68eed833b9e1-kube-api-access-psmp5\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.587340 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0db17102-ad8c-40d5-8ff3-68eed833b9e1-etc-machine-id\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.590733 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.596086 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.688241 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.688309 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data-custom\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.688339 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-scripts\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.688381 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0db17102-ad8c-40d5-8ff3-68eed833b9e1-logs\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.688413 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psmp5\" (UniqueName: \"kubernetes.io/projected/0db17102-ad8c-40d5-8ff3-68eed833b9e1-kube-api-access-psmp5\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.688448 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0db17102-ad8c-40d5-8ff3-68eed833b9e1-etc-machine-id\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.688581 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0db17102-ad8c-40d5-8ff3-68eed833b9e1-etc-machine-id\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.689911 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0db17102-ad8c-40d5-8ff3-68eed833b9e1-logs\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.693141 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-scripts\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.693353 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data-custom\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.693455 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.722122 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psmp5\" (UniqueName: \"kubernetes.io/projected/0db17102-ad8c-40d5-8ff3-68eed833b9e1-kube-api-access-psmp5\") pod \"manila-api-0\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.835031 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.860635 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.885143 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:08:14 crc kubenswrapper[4948]: W1122 05:08:14.895455 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode16f33c0_0a4c_48bf_8c2d_6828e2564977.slice/crio-e953fdfe9dd5aa1a97693c266c020a09a78e77f9f14e396b98667daea4d267db WatchSource:0}: Error finding container e953fdfe9dd5aa1a97693c266c020a09a78e77f9f14e396b98667daea4d267db: Status 404 returned error can't find the container with id e953fdfe9dd5aa1a97693c266c020a09a78e77f9f14e396b98667daea4d267db Nov 22 05:08:14 crc kubenswrapper[4948]: I1122 05:08:14.897008 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"506bf3aa-7e96-48b6-94dd-a3b1346cb464","Type":"ContainerStarted","Data":"0dddafcbab0c496104b537a561af6045447657c5aaabfed018ef5d80932dce98"} Nov 22 05:08:15 crc kubenswrapper[4948]: I1122 05:08:15.070210 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:08:15 crc kubenswrapper[4948]: I1122 05:08:15.915659 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"e16f33c0-0a4c-48bf-8c2d-6828e2564977","Type":"ContainerStarted","Data":"e953fdfe9dd5aa1a97693c266c020a09a78e77f9f14e396b98667daea4d267db"} Nov 22 05:08:15 crc kubenswrapper[4948]: I1122 05:08:15.918606 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"0db17102-ad8c-40d5-8ff3-68eed833b9e1","Type":"ContainerStarted","Data":"4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62"} Nov 22 05:08:15 crc kubenswrapper[4948]: I1122 05:08:15.918657 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"0db17102-ad8c-40d5-8ff3-68eed833b9e1","Type":"ContainerStarted","Data":"88d18faafbd9ca01ccd5fa3bcccc11f81cf9a517d1dd26bbf01e77a58fb48e5b"} Nov 22 05:08:16 crc kubenswrapper[4948]: I1122 05:08:16.928246 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"0db17102-ad8c-40d5-8ff3-68eed833b9e1","Type":"ContainerStarted","Data":"5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134"} Nov 22 05:08:16 crc kubenswrapper[4948]: I1122 05:08:16.928612 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:16 crc kubenswrapper[4948]: I1122 05:08:16.932537 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"506bf3aa-7e96-48b6-94dd-a3b1346cb464","Type":"ContainerStarted","Data":"7b5b40df80a167d5dfee079706b15f8ca911922b576902a45ddf9d49f221c373"} Nov 22 05:08:16 crc kubenswrapper[4948]: I1122 05:08:16.932572 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"506bf3aa-7e96-48b6-94dd-a3b1346cb464","Type":"ContainerStarted","Data":"f66f84fa7edcad39daec92e0d5390fd479721280760f42e16981eff1011e8372"} Nov 22 05:08:16 crc kubenswrapper[4948]: I1122 05:08:16.953550 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-api-0" podStartSLOduration=2.953535285 podStartE2EDuration="2.953535285s" podCreationTimestamp="2025-11-22 05:08:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:08:16.950701834 +0000 UTC m=+1299.636712350" watchObservedRunningTime="2025-11-22 05:08:16.953535285 +0000 UTC m=+1299.639545801" Nov 22 05:08:16 crc kubenswrapper[4948]: I1122 05:08:16.978221 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-scheduler-0" podStartSLOduration=2.228412525 podStartE2EDuration="2.978192573s" podCreationTimestamp="2025-11-22 05:08:14 +0000 UTC" firstStartedPulling="2025-11-22 05:08:14.855149339 +0000 UTC m=+1297.541159865" lastFinishedPulling="2025-11-22 05:08:15.604929397 +0000 UTC m=+1298.290939913" observedRunningTime="2025-11-22 05:08:16.977973507 +0000 UTC m=+1299.663984023" watchObservedRunningTime="2025-11-22 05:08:16.978192573 +0000 UTC m=+1299.664203089" Nov 22 05:08:20 crc kubenswrapper[4948]: I1122 05:08:20.992197 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"e16f33c0-0a4c-48bf-8c2d-6828e2564977","Type":"ContainerStarted","Data":"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa"} Nov 22 05:08:20 crc kubenswrapper[4948]: I1122 05:08:20.998129 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"e16f33c0-0a4c-48bf-8c2d-6828e2564977","Type":"ContainerStarted","Data":"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b"} Nov 22 05:08:21 crc kubenswrapper[4948]: I1122 05:08:21.027853 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-share-share0-0" podStartSLOduration=1.899908675 podStartE2EDuration="7.027821376s" podCreationTimestamp="2025-11-22 05:08:14 +0000 UTC" firstStartedPulling="2025-11-22 05:08:14.897515019 +0000 UTC m=+1297.583525535" lastFinishedPulling="2025-11-22 05:08:20.02542773 +0000 UTC m=+1302.711438236" observedRunningTime="2025-11-22 05:08:21.019810859 +0000 UTC m=+1303.705821435" watchObservedRunningTime="2025-11-22 05:08:21.027821376 +0000 UTC m=+1303.713831932" Nov 22 05:08:24 crc kubenswrapper[4948]: I1122 05:08:24.591104 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:24 crc kubenswrapper[4948]: I1122 05:08:24.597070 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:29 crc kubenswrapper[4948]: I1122 05:08:29.789788 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:08:29 crc kubenswrapper[4948]: I1122 05:08:29.791088 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:08:29 crc kubenswrapper[4948]: I1122 05:08:29.791195 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 05:08:29 crc kubenswrapper[4948]: I1122 05:08:29.791838 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a0943f4e445f12f6dc5d4e849eee67dd81bfc6359d81ae42402f37fb54747939"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 05:08:29 crc kubenswrapper[4948]: I1122 05:08:29.791981 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://a0943f4e445f12f6dc5d4e849eee67dd81bfc6359d81ae42402f37fb54747939" gracePeriod=600 Nov 22 05:08:30 crc kubenswrapper[4948]: I1122 05:08:30.059536 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="a0943f4e445f12f6dc5d4e849eee67dd81bfc6359d81ae42402f37fb54747939" exitCode=0 Nov 22 05:08:30 crc kubenswrapper[4948]: I1122 05:08:30.059598 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"a0943f4e445f12f6dc5d4e849eee67dd81bfc6359d81ae42402f37fb54747939"} Nov 22 05:08:30 crc kubenswrapper[4948]: I1122 05:08:30.059899 4948 scope.go:117] "RemoveContainer" containerID="82cd1f7ac46cb027948972361fb8a42fe8301bdb56d8d047033cf856f1c72a5c" Nov 22 05:08:31 crc kubenswrapper[4948]: I1122 05:08:31.068438 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"23c94a6fcacee9c3faebd2019427dd27c2a4acb1f102d2cd48e6ac38c0f38971"} Nov 22 05:08:36 crc kubenswrapper[4948]: I1122 05:08:36.135279 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:08:36 crc kubenswrapper[4948]: I1122 05:08:36.206331 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:08:36 crc kubenswrapper[4948]: I1122 05:08:36.334574 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.379905 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-api-2"] Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.381603 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.386325 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-api-1"] Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.391072 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.400425 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-2"] Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.417408 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-1"] Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527254 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-etc-machine-id\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527299 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqs9k\" (UniqueName: \"kubernetes.io/projected/ae0bfa4e-c097-4a05-ab4f-2e951377d066-kube-api-access-zqs9k\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527332 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527387 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data-custom\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527406 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-scripts\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527444 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ae0bfa4e-c097-4a05-ab4f-2e951377d066-etc-machine-id\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527493 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae0bfa4e-c097-4a05-ab4f-2e951377d066-logs\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527571 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527734 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data-custom\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527776 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-scripts\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527804 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-logs\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.527845 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7l5t\" (UniqueName: \"kubernetes.io/projected/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-kube-api-access-b7l5t\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629185 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629276 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data-custom\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629318 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-scripts\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629351 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ae0bfa4e-c097-4a05-ab4f-2e951377d066-etc-machine-id\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629377 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae0bfa4e-c097-4a05-ab4f-2e951377d066-logs\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629396 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629448 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data-custom\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629496 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-scripts\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629523 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-logs\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629517 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ae0bfa4e-c097-4a05-ab4f-2e951377d066-etc-machine-id\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629552 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7l5t\" (UniqueName: \"kubernetes.io/projected/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-kube-api-access-b7l5t\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629605 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-etc-machine-id\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.629633 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqs9k\" (UniqueName: \"kubernetes.io/projected/ae0bfa4e-c097-4a05-ab4f-2e951377d066-kube-api-access-zqs9k\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.630134 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-etc-machine-id\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.630208 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae0bfa4e-c097-4a05-ab4f-2e951377d066-logs\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.630342 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-logs\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.636242 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data-custom\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.636422 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.638052 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-scripts\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.643369 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-scripts\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.643596 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data-custom\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.643950 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.648783 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7l5t\" (UniqueName: \"kubernetes.io/projected/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-kube-api-access-b7l5t\") pod \"manila-api-1\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.657905 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqs9k\" (UniqueName: \"kubernetes.io/projected/ae0bfa4e-c097-4a05-ab4f-2e951377d066-kube-api-access-zqs9k\") pod \"manila-api-2\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.708131 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.729958 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:37 crc kubenswrapper[4948]: I1122 05:08:37.964577 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-2"] Nov 22 05:08:37 crc kubenswrapper[4948]: W1122 05:08:37.977590 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae0bfa4e_c097_4a05_ab4f_2e951377d066.slice/crio-38633197deecd411ff33bbd742f7c2f0663485feff3fac8e2818d495c6c57e1d WatchSource:0}: Error finding container 38633197deecd411ff33bbd742f7c2f0663485feff3fac8e2818d495c6c57e1d: Status 404 returned error can't find the container with id 38633197deecd411ff33bbd742f7c2f0663485feff3fac8e2818d495c6c57e1d Nov 22 05:08:38 crc kubenswrapper[4948]: I1122 05:08:38.003364 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-1"] Nov 22 05:08:38 crc kubenswrapper[4948]: W1122 05:08:38.005163 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8435ae57_6365_4f3d_b5ea_c5eb7ab50961.slice/crio-d2709381e816f011b2b38af7bbc49927b1cfc05bb2fd9f4a2ae258e34c120144 WatchSource:0}: Error finding container d2709381e816f011b2b38af7bbc49927b1cfc05bb2fd9f4a2ae258e34c120144: Status 404 returned error can't find the container with id d2709381e816f011b2b38af7bbc49927b1cfc05bb2fd9f4a2ae258e34c120144 Nov 22 05:08:38 crc kubenswrapper[4948]: I1122 05:08:38.137773 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-2" event={"ID":"ae0bfa4e-c097-4a05-ab4f-2e951377d066","Type":"ContainerStarted","Data":"38633197deecd411ff33bbd742f7c2f0663485feff3fac8e2818d495c6c57e1d"} Nov 22 05:08:38 crc kubenswrapper[4948]: I1122 05:08:38.141947 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-1" event={"ID":"8435ae57-6365-4f3d-b5ea-c5eb7ab50961","Type":"ContainerStarted","Data":"d2709381e816f011b2b38af7bbc49927b1cfc05bb2fd9f4a2ae258e34c120144"} Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.153120 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-1" event={"ID":"8435ae57-6365-4f3d-b5ea-c5eb7ab50961","Type":"ContainerStarted","Data":"1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35"} Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.154582 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-1" event={"ID":"8435ae57-6365-4f3d-b5ea-c5eb7ab50961","Type":"ContainerStarted","Data":"ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba"} Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.154692 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.155073 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-2" event={"ID":"ae0bfa4e-c097-4a05-ab4f-2e951377d066","Type":"ContainerStarted","Data":"ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909"} Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.155117 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-2" event={"ID":"ae0bfa4e-c097-4a05-ab4f-2e951377d066","Type":"ContainerStarted","Data":"848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223"} Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.155445 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/manila-api-2" Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.194034 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-api-1" podStartSLOduration=2.194014137 podStartE2EDuration="2.194014137s" podCreationTimestamp="2025-11-22 05:08:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:08:39.177379215 +0000 UTC m=+1321.863389751" watchObservedRunningTime="2025-11-22 05:08:39.194014137 +0000 UTC m=+1321.880024653" Nov 22 05:08:39 crc kubenswrapper[4948]: I1122 05:08:39.199407 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-api-2" podStartSLOduration=2.199385509 podStartE2EDuration="2.199385509s" podCreationTimestamp="2025-11-22 05:08:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:08:39.192756991 +0000 UTC m=+1321.878767527" watchObservedRunningTime="2025-11-22 05:08:39.199385509 +0000 UTC m=+1321.885396025" Nov 22 05:08:58 crc kubenswrapper[4948]: I1122 05:08:58.988978 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/manila-api-1" Nov 22 05:08:59 crc kubenswrapper[4948]: I1122 05:08:59.084045 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/manila-api-2" Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.210564 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-2"] Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.211288 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-2" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api-log" containerID="cri-o://848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223" gracePeriod=30 Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.211393 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-2" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api" containerID="cri-o://ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909" gracePeriod=30 Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.223956 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-1"] Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.224208 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-1" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api-log" containerID="cri-o://ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba" gracePeriod=30 Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.224315 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-1" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api" containerID="cri-o://1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35" gracePeriod=30 Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.355807 4948 generic.go:334] "Generic (PLEG): container finished" podID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerID="848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223" exitCode=143 Nov 22 05:09:00 crc kubenswrapper[4948]: I1122 05:09:00.356102 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-2" event={"ID":"ae0bfa4e-c097-4a05-ab4f-2e951377d066","Type":"ContainerDied","Data":"848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223"} Nov 22 05:09:01 crc kubenswrapper[4948]: I1122 05:09:01.364730 4948 generic.go:334] "Generic (PLEG): container finished" podID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerID="ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba" exitCode=143 Nov 22 05:09:01 crc kubenswrapper[4948]: I1122 05:09:01.364790 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-1" event={"ID":"8435ae57-6365-4f3d-b5ea-c5eb7ab50961","Type":"ContainerDied","Data":"ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba"} Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.823572 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-2" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.829089 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-1" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.969711 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqs9k\" (UniqueName: \"kubernetes.io/projected/ae0bfa4e-c097-4a05-ab4f-2e951377d066-kube-api-access-zqs9k\") pod \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.969804 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data\") pod \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.969840 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae0bfa4e-c097-4a05-ab4f-2e951377d066-logs\") pod \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.969876 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data\") pod \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.969920 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data-custom\") pod \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.969953 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-scripts\") pod \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.970007 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-scripts\") pod \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.970074 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-etc-machine-id\") pod \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.970105 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-logs\") pod \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.970132 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data-custom\") pod \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.970181 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ae0bfa4e-c097-4a05-ab4f-2e951377d066-etc-machine-id\") pod \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\" (UID: \"ae0bfa4e-c097-4a05-ab4f-2e951377d066\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.970227 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7l5t\" (UniqueName: \"kubernetes.io/projected/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-kube-api-access-b7l5t\") pod \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\" (UID: \"8435ae57-6365-4f3d-b5ea-c5eb7ab50961\") " Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.970915 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8435ae57-6365-4f3d-b5ea-c5eb7ab50961" (UID: "8435ae57-6365-4f3d-b5ea-c5eb7ab50961"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.971098 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ae0bfa4e-c097-4a05-ab4f-2e951377d066-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ae0bfa4e-c097-4a05-ab4f-2e951377d066" (UID: "ae0bfa4e-c097-4a05-ab4f-2e951377d066"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.971335 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-logs" (OuterVolumeSpecName: "logs") pod "8435ae57-6365-4f3d-b5ea-c5eb7ab50961" (UID: "8435ae57-6365-4f3d-b5ea-c5eb7ab50961"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.971351 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae0bfa4e-c097-4a05-ab4f-2e951377d066-logs" (OuterVolumeSpecName: "logs") pod "ae0bfa4e-c097-4a05-ab4f-2e951377d066" (UID: "ae0bfa4e-c097-4a05-ab4f-2e951377d066"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.976867 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-scripts" (OuterVolumeSpecName: "scripts") pod "8435ae57-6365-4f3d-b5ea-c5eb7ab50961" (UID: "8435ae57-6365-4f3d-b5ea-c5eb7ab50961"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.976946 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8435ae57-6365-4f3d-b5ea-c5eb7ab50961" (UID: "8435ae57-6365-4f3d-b5ea-c5eb7ab50961"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.976997 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-scripts" (OuterVolumeSpecName: "scripts") pod "ae0bfa4e-c097-4a05-ab4f-2e951377d066" (UID: "ae0bfa4e-c097-4a05-ab4f-2e951377d066"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.977140 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ae0bfa4e-c097-4a05-ab4f-2e951377d066" (UID: "ae0bfa4e-c097-4a05-ab4f-2e951377d066"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.977237 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-kube-api-access-b7l5t" (OuterVolumeSpecName: "kube-api-access-b7l5t") pod "8435ae57-6365-4f3d-b5ea-c5eb7ab50961" (UID: "8435ae57-6365-4f3d-b5ea-c5eb7ab50961"). InnerVolumeSpecName "kube-api-access-b7l5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:03 crc kubenswrapper[4948]: I1122 05:09:03.981645 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae0bfa4e-c097-4a05-ab4f-2e951377d066-kube-api-access-zqs9k" (OuterVolumeSpecName: "kube-api-access-zqs9k") pod "ae0bfa4e-c097-4a05-ab4f-2e951377d066" (UID: "ae0bfa4e-c097-4a05-ab4f-2e951377d066"). InnerVolumeSpecName "kube-api-access-zqs9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.016395 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data" (OuterVolumeSpecName: "config-data") pod "8435ae57-6365-4f3d-b5ea-c5eb7ab50961" (UID: "8435ae57-6365-4f3d-b5ea-c5eb7ab50961"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.021352 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data" (OuterVolumeSpecName: "config-data") pod "ae0bfa4e-c097-4a05-ab4f-2e951377d066" (UID: "ae0bfa4e-c097-4a05-ab4f-2e951377d066"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073072 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073113 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073127 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae0bfa4e-c097-4a05-ab4f-2e951377d066-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073139 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073151 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073163 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-logs\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073175 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073186 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ae0bfa4e-c097-4a05-ab4f-2e951377d066-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073197 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7l5t\" (UniqueName: \"kubernetes.io/projected/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-kube-api-access-b7l5t\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073209 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqs9k\" (UniqueName: \"kubernetes.io/projected/ae0bfa4e-c097-4a05-ab4f-2e951377d066-kube-api-access-zqs9k\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073220 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8435ae57-6365-4f3d-b5ea-c5eb7ab50961-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.073231 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae0bfa4e-c097-4a05-ab4f-2e951377d066-logs\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.393506 4948 generic.go:334] "Generic (PLEG): container finished" podID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerID="ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909" exitCode=0 Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.393600 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-2" event={"ID":"ae0bfa4e-c097-4a05-ab4f-2e951377d066","Type":"ContainerDied","Data":"ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909"} Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.393643 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-2" event={"ID":"ae0bfa4e-c097-4a05-ab4f-2e951377d066","Type":"ContainerDied","Data":"38633197deecd411ff33bbd742f7c2f0663485feff3fac8e2818d495c6c57e1d"} Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.393671 4948 scope.go:117] "RemoveContainer" containerID="ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.393882 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-2" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.397679 4948 generic.go:334] "Generic (PLEG): container finished" podID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerID="1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35" exitCode=0 Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.397774 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-1" event={"ID":"8435ae57-6365-4f3d-b5ea-c5eb7ab50961","Type":"ContainerDied","Data":"1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35"} Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.397834 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-1" event={"ID":"8435ae57-6365-4f3d-b5ea-c5eb7ab50961","Type":"ContainerDied","Data":"d2709381e816f011b2b38af7bbc49927b1cfc05bb2fd9f4a2ae258e34c120144"} Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.397862 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-1" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.427695 4948 scope.go:117] "RemoveContainer" containerID="848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.467175 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-1"] Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.482267 4948 scope.go:117] "RemoveContainer" containerID="ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909" Nov 22 05:09:04 crc kubenswrapper[4948]: E1122 05:09:04.482999 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909\": container with ID starting with ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909 not found: ID does not exist" containerID="ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.483057 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909"} err="failed to get container status \"ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909\": rpc error: code = NotFound desc = could not find container \"ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909\": container with ID starting with ff68b715e81e6af592e51ba0e8899885d3445710fa15592651b6f1af2d4e6909 not found: ID does not exist" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.483108 4948 scope.go:117] "RemoveContainer" containerID="848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223" Nov 22 05:09:04 crc kubenswrapper[4948]: E1122 05:09:04.484006 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223\": container with ID starting with 848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223 not found: ID does not exist" containerID="848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.484043 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223"} err="failed to get container status \"848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223\": rpc error: code = NotFound desc = could not find container \"848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223\": container with ID starting with 848f37187590ddbcc568af8edf160d22a8e6b5630138f2c185cd7261507df223 not found: ID does not exist" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.484072 4948 scope.go:117] "RemoveContainer" containerID="1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.490129 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-api-1"] Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.503684 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-2"] Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.511120 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-api-2"] Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.518866 4948 scope.go:117] "RemoveContainer" containerID="ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.539679 4948 scope.go:117] "RemoveContainer" containerID="1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35" Nov 22 05:09:04 crc kubenswrapper[4948]: E1122 05:09:04.540222 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35\": container with ID starting with 1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35 not found: ID does not exist" containerID="1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.540282 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35"} err="failed to get container status \"1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35\": rpc error: code = NotFound desc = could not find container \"1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35\": container with ID starting with 1daf42c2fb36046513b1ba6d3386a4aac35428e80c2179b9ce42892d6e078d35 not found: ID does not exist" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.540321 4948 scope.go:117] "RemoveContainer" containerID="ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba" Nov 22 05:09:04 crc kubenswrapper[4948]: E1122 05:09:04.541055 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba\": container with ID starting with ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba not found: ID does not exist" containerID="ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba" Nov 22 05:09:04 crc kubenswrapper[4948]: I1122 05:09:04.541106 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba"} err="failed to get container status \"ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba\": rpc error: code = NotFound desc = could not find container \"ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba\": container with ID starting with ac7dcfec237d2e3d7d642d272cfbed54d9e61fc6090d7a26b35816e5414343ba not found: ID does not exist" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.189210 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-scheduler-1"] Nov 22 05:09:05 crc kubenswrapper[4948]: E1122 05:09:05.189937 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api-log" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.189960 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api-log" Nov 22 05:09:05 crc kubenswrapper[4948]: E1122 05:09:05.189985 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.189997 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api" Nov 22 05:09:05 crc kubenswrapper[4948]: E1122 05:09:05.190026 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api-log" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.190039 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api-log" Nov 22 05:09:05 crc kubenswrapper[4948]: E1122 05:09:05.190066 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.190079 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.190286 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.190309 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" containerName="manila-api-log" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.190334 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api-log" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.190351 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" containerName="manila-api" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.191438 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.201243 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-1"] Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.393084 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnhnf\" (UniqueName: \"kubernetes.io/projected/80f5609a-13de-49c6-8d3f-204e63efb912-kube-api-access-rnhnf\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.393139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.393166 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-scripts\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.393375 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80f5609a-13de-49c6-8d3f-204e63efb912-etc-machine-id\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.393491 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data-custom\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.494626 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data-custom\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.494688 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnhnf\" (UniqueName: \"kubernetes.io/projected/80f5609a-13de-49c6-8d3f-204e63efb912-kube-api-access-rnhnf\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.494731 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.494760 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-scripts\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.494878 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80f5609a-13de-49c6-8d3f-204e63efb912-etc-machine-id\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.495152 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80f5609a-13de-49c6-8d3f-204e63efb912-etc-machine-id\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.500989 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data-custom\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.501201 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.503039 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-scripts\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.525706 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnhnf\" (UniqueName: \"kubernetes.io/projected/80f5609a-13de-49c6-8d3f-204e63efb912-kube-api-access-rnhnf\") pod \"manila-scheduler-1\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.789960 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8435ae57-6365-4f3d-b5ea-c5eb7ab50961" path="/var/lib/kubelet/pods/8435ae57-6365-4f3d-b5ea-c5eb7ab50961/volumes" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.790665 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae0bfa4e-c097-4a05-ab4f-2e951377d066" path="/var/lib/kubelet/pods/ae0bfa4e-c097-4a05-ab4f-2e951377d066/volumes" Nov 22 05:09:05 crc kubenswrapper[4948]: I1122 05:09:05.816919 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:06 crc kubenswrapper[4948]: I1122 05:09:06.064578 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-1"] Nov 22 05:09:06 crc kubenswrapper[4948]: I1122 05:09:06.418122 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-1" event={"ID":"80f5609a-13de-49c6-8d3f-204e63efb912","Type":"ContainerStarted","Data":"21c4afc56d994cc5c9e1f968103a8234e561fc1637ae1e7e3f6a07699534f685"} Nov 22 05:09:07 crc kubenswrapper[4948]: I1122 05:09:07.431913 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-1" event={"ID":"80f5609a-13de-49c6-8d3f-204e63efb912","Type":"ContainerStarted","Data":"a4e810d38c554d793c35151ef1d752de9d0e731f4f1c66f7b3e6d96290405a37"} Nov 22 05:09:07 crc kubenswrapper[4948]: I1122 05:09:07.432234 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-1" event={"ID":"80f5609a-13de-49c6-8d3f-204e63efb912","Type":"ContainerStarted","Data":"df262e563217e766d37a1eb610b4ba3a87e5f679fbeb6c5481e9d4c03df0a7e6"} Nov 22 05:09:07 crc kubenswrapper[4948]: I1122 05:09:07.457245 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-scheduler-1" podStartSLOduration=2.457216384 podStartE2EDuration="2.457216384s" podCreationTimestamp="2025-11-22 05:09:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:09:07.456279427 +0000 UTC m=+1350.142290013" watchObservedRunningTime="2025-11-22 05:09:07.457216384 +0000 UTC m=+1350.143226940" Nov 22 05:09:15 crc kubenswrapper[4948]: I1122 05:09:15.818044 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.316552 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.402212 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-scheduler-2"] Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.404607 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.415207 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-2"] Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.530609 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-scripts\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.530677 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data-custom\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.530698 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f95caedb-58f5-4566-b6be-d1067c181d90-etc-machine-id\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.530760 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.530797 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh9wm\" (UniqueName: \"kubernetes.io/projected/f95caedb-58f5-4566-b6be-d1067c181d90-kube-api-access-xh9wm\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.631889 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-scripts\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.631959 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data-custom\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.631982 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f95caedb-58f5-4566-b6be-d1067c181d90-etc-machine-id\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.632012 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.632032 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh9wm\" (UniqueName: \"kubernetes.io/projected/f95caedb-58f5-4566-b6be-d1067c181d90-kube-api-access-xh9wm\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.632423 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f95caedb-58f5-4566-b6be-d1067c181d90-etc-machine-id\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.639572 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.642029 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data-custom\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.645399 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-scripts\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.654624 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh9wm\" (UniqueName: \"kubernetes.io/projected/f95caedb-58f5-4566-b6be-d1067c181d90-kube-api-access-xh9wm\") pod \"manila-scheduler-2\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:27 crc kubenswrapper[4948]: I1122 05:09:27.728536 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:28 crc kubenswrapper[4948]: I1122 05:09:28.144991 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-2"] Nov 22 05:09:28 crc kubenswrapper[4948]: W1122 05:09:28.158667 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf95caedb_58f5_4566_b6be_d1067c181d90.slice/crio-360286d4a3fcf3351201fe04929c315ae801a8cdc60f1c49b8af5ea75ded61d7 WatchSource:0}: Error finding container 360286d4a3fcf3351201fe04929c315ae801a8cdc60f1c49b8af5ea75ded61d7: Status 404 returned error can't find the container with id 360286d4a3fcf3351201fe04929c315ae801a8cdc60f1c49b8af5ea75ded61d7 Nov 22 05:09:28 crc kubenswrapper[4948]: I1122 05:09:28.610978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-2" event={"ID":"f95caedb-58f5-4566-b6be-d1067c181d90","Type":"ContainerStarted","Data":"ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25"} Nov 22 05:09:28 crc kubenswrapper[4948]: I1122 05:09:28.611258 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-2" event={"ID":"f95caedb-58f5-4566-b6be-d1067c181d90","Type":"ContainerStarted","Data":"360286d4a3fcf3351201fe04929c315ae801a8cdc60f1c49b8af5ea75ded61d7"} Nov 22 05:09:30 crc kubenswrapper[4948]: I1122 05:09:30.644563 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-2" event={"ID":"f95caedb-58f5-4566-b6be-d1067c181d90","Type":"ContainerStarted","Data":"229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c"} Nov 22 05:09:30 crc kubenswrapper[4948]: I1122 05:09:30.683866 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-scheduler-2" podStartSLOduration=3.683837417 podStartE2EDuration="3.683837417s" podCreationTimestamp="2025-11-22 05:09:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:09:30.678184257 +0000 UTC m=+1373.364194813" watchObservedRunningTime="2025-11-22 05:09:30.683837417 +0000 UTC m=+1373.369847973" Nov 22 05:09:37 crc kubenswrapper[4948]: I1122 05:09:37.729370 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:49 crc kubenswrapper[4948]: I1122 05:09:49.131848 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.621612 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-db-sync-pqnwf"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.626664 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-db-sync-pqnwf"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.675157 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.675452 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-0" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="manila-scheduler" containerID="cri-o://f66f84fa7edcad39daec92e0d5390fd479721280760f42e16981eff1011e8372" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.675531 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-0" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="probe" containerID="cri-o://7b5b40df80a167d5dfee079706b15f8ca911922b576902a45ddf9d49f221c373" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.688518 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-2"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.689346 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-2" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="manila-scheduler" containerID="cri-o://ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.689504 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-2" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="probe" containerID="cri-o://229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.704572 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.704786 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share0-0" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="manila-share" containerID="cri-o://5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.704904 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share0-0" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="probe" containerID="cri-o://707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.713498 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-1"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.713947 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-1" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="manila-scheduler" containerID="cri-o://a4e810d38c554d793c35151ef1d752de9d0e731f4f1c66f7b3e6d96290405a37" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.714188 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-1" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="probe" containerID="cri-o://df262e563217e766d37a1eb610b4ba3a87e5f679fbeb6c5481e9d4c03df0a7e6" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.722575 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila3383-account-delete-b22nb"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.723853 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila3383-account-delete-b22nb" Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.736302 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.736825 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-0" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api-log" containerID="cri-o://4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.737051 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-0" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api" containerID="cri-o://5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134" gracePeriod=30 Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.741058 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila3383-account-delete-b22nb"] Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.802304 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkqtm\" (UniqueName: \"kubernetes.io/projected/4c0cb4f0-d02c-4aa5-a367-7a1b0a831065-kube-api-access-gkqtm\") pod \"manila3383-account-delete-b22nb\" (UID: \"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065\") " pod="manila-kuttl-tests/manila3383-account-delete-b22nb" Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.903698 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkqtm\" (UniqueName: \"kubernetes.io/projected/4c0cb4f0-d02c-4aa5-a367-7a1b0a831065-kube-api-access-gkqtm\") pod \"manila3383-account-delete-b22nb\" (UID: \"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065\") " pod="manila-kuttl-tests/manila3383-account-delete-b22nb" Nov 22 05:09:50 crc kubenswrapper[4948]: I1122 05:09:50.922756 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkqtm\" (UniqueName: \"kubernetes.io/projected/4c0cb4f0-d02c-4aa5-a367-7a1b0a831065-kube-api-access-gkqtm\") pod \"manila3383-account-delete-b22nb\" (UID: \"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065\") " pod="manila-kuttl-tests/manila3383-account-delete-b22nb" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.049978 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila3383-account-delete-b22nb" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.531255 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila3383-account-delete-b22nb"] Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.539972 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619064 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data-custom\") pod \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619151 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlrrm\" (UniqueName: \"kubernetes.io/projected/e16f33c0-0a4c-48bf-8c2d-6828e2564977-kube-api-access-hlrrm\") pod \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619201 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-var-lib-manila\") pod \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619225 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data\") pod \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619254 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-ceph\") pod \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619293 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-etc-machine-id\") pod \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619329 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "e16f33c0-0a4c-48bf-8c2d-6828e2564977" (UID: "e16f33c0-0a4c-48bf-8c2d-6828e2564977"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619350 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-scripts\") pod \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\" (UID: \"e16f33c0-0a4c-48bf-8c2d-6828e2564977\") " Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619397 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e16f33c0-0a4c-48bf-8c2d-6828e2564977" (UID: "e16f33c0-0a4c-48bf-8c2d-6828e2564977"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.619979 4948 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.620003 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e16f33c0-0a4c-48bf-8c2d-6828e2564977-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.625534 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e16f33c0-0a4c-48bf-8c2d-6828e2564977" (UID: "e16f33c0-0a4c-48bf-8c2d-6828e2564977"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.627612 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-scripts" (OuterVolumeSpecName: "scripts") pod "e16f33c0-0a4c-48bf-8c2d-6828e2564977" (UID: "e16f33c0-0a4c-48bf-8c2d-6828e2564977"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.627759 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e16f33c0-0a4c-48bf-8c2d-6828e2564977-kube-api-access-hlrrm" (OuterVolumeSpecName: "kube-api-access-hlrrm") pod "e16f33c0-0a4c-48bf-8c2d-6828e2564977" (UID: "e16f33c0-0a4c-48bf-8c2d-6828e2564977"). InnerVolumeSpecName "kube-api-access-hlrrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.629078 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-ceph" (OuterVolumeSpecName: "ceph") pod "e16f33c0-0a4c-48bf-8c2d-6828e2564977" (UID: "e16f33c0-0a4c-48bf-8c2d-6828e2564977"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.718609 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data" (OuterVolumeSpecName: "config-data") pod "e16f33c0-0a4c-48bf-8c2d-6828e2564977" (UID: "e16f33c0-0a4c-48bf-8c2d-6828e2564977"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.723305 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.723339 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.723352 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlrrm\" (UniqueName: \"kubernetes.io/projected/e16f33c0-0a4c-48bf-8c2d-6828e2564977-kube-api-access-hlrrm\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.723361 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.723372 4948 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e16f33c0-0a4c-48bf-8c2d-6828e2564977-ceph\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.769587 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55019746-79bf-40ed-8fdd-a34adc3d8c02" path="/var/lib/kubelet/pods/55019746-79bf-40ed-8fdd-a34adc3d8c02/volumes" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.854823 4948 generic.go:334] "Generic (PLEG): container finished" podID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerID="707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa" exitCode=0 Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.855096 4948 generic.go:334] "Generic (PLEG): container finished" podID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerID="5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b" exitCode=1 Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.854928 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.854904 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"e16f33c0-0a4c-48bf-8c2d-6828e2564977","Type":"ContainerDied","Data":"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.855440 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"e16f33c0-0a4c-48bf-8c2d-6828e2564977","Type":"ContainerDied","Data":"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.855480 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"e16f33c0-0a4c-48bf-8c2d-6828e2564977","Type":"ContainerDied","Data":"e953fdfe9dd5aa1a97693c266c020a09a78e77f9f14e396b98667daea4d267db"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.855505 4948 scope.go:117] "RemoveContainer" containerID="707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.859144 4948 generic.go:334] "Generic (PLEG): container finished" podID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerID="4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62" exitCode=143 Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.859251 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"0db17102-ad8c-40d5-8ff3-68eed833b9e1","Type":"ContainerDied","Data":"4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.873987 4948 generic.go:334] "Generic (PLEG): container finished" podID="f95caedb-58f5-4566-b6be-d1067c181d90" containerID="229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c" exitCode=0 Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.874089 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-2" event={"ID":"f95caedb-58f5-4566-b6be-d1067c181d90","Type":"ContainerDied","Data":"229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.875978 4948 generic.go:334] "Generic (PLEG): container finished" podID="80f5609a-13de-49c6-8d3f-204e63efb912" containerID="df262e563217e766d37a1eb610b4ba3a87e5f679fbeb6c5481e9d4c03df0a7e6" exitCode=0 Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.876000 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-1" event={"ID":"80f5609a-13de-49c6-8d3f-204e63efb912","Type":"ContainerDied","Data":"df262e563217e766d37a1eb610b4ba3a87e5f679fbeb6c5481e9d4c03df0a7e6"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.877640 4948 generic.go:334] "Generic (PLEG): container finished" podID="4c0cb4f0-d02c-4aa5-a367-7a1b0a831065" containerID="e4ba0d6775262ab1ad86a567fd6ba466208ea867415590e5157346417846b883" exitCode=0 Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.877669 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila3383-account-delete-b22nb" event={"ID":"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065","Type":"ContainerDied","Data":"e4ba0d6775262ab1ad86a567fd6ba466208ea867415590e5157346417846b883"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.877692 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila3383-account-delete-b22nb" event={"ID":"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065","Type":"ContainerStarted","Data":"d3366f1715dd84800264c739f5d725291d69199d41e2cfef14b0fa2e97ee269a"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.879249 4948 generic.go:334] "Generic (PLEG): container finished" podID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerID="7b5b40df80a167d5dfee079706b15f8ca911922b576902a45ddf9d49f221c373" exitCode=0 Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.879278 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"506bf3aa-7e96-48b6-94dd-a3b1346cb464","Type":"ContainerDied","Data":"7b5b40df80a167d5dfee079706b15f8ca911922b576902a45ddf9d49f221c373"} Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.915305 4948 scope.go:117] "RemoveContainer" containerID="5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.921317 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.928618 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.934155 4948 scope.go:117] "RemoveContainer" containerID="707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa" Nov 22 05:09:51 crc kubenswrapper[4948]: E1122 05:09:51.934601 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa\": container with ID starting with 707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa not found: ID does not exist" containerID="707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.934635 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa"} err="failed to get container status \"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa\": rpc error: code = NotFound desc = could not find container \"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa\": container with ID starting with 707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa not found: ID does not exist" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.934663 4948 scope.go:117] "RemoveContainer" containerID="5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b" Nov 22 05:09:51 crc kubenswrapper[4948]: E1122 05:09:51.935827 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b\": container with ID starting with 5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b not found: ID does not exist" containerID="5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.935855 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b"} err="failed to get container status \"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b\": rpc error: code = NotFound desc = could not find container \"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b\": container with ID starting with 5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b not found: ID does not exist" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.935869 4948 scope.go:117] "RemoveContainer" containerID="707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.936178 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa"} err="failed to get container status \"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa\": rpc error: code = NotFound desc = could not find container \"707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa\": container with ID starting with 707045ab622272018445c75bb393b179dd23214337c91cebe1021feebaf6e7fa not found: ID does not exist" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.936214 4948 scope.go:117] "RemoveContainer" containerID="5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b" Nov 22 05:09:51 crc kubenswrapper[4948]: I1122 05:09:51.936544 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b"} err="failed to get container status \"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b\": rpc error: code = NotFound desc = could not find container \"5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b\": container with ID starting with 5bd8d303c6144e5cdec785ba855a777cc38912e93775b56100c6f192b08cd60b not found: ID does not exist" Nov 22 05:09:52 crc kubenswrapper[4948]: I1122 05:09:52.887998 4948 generic.go:334] "Generic (PLEG): container finished" podID="80f5609a-13de-49c6-8d3f-204e63efb912" containerID="a4e810d38c554d793c35151ef1d752de9d0e731f4f1c66f7b3e6d96290405a37" exitCode=0 Nov 22 05:09:52 crc kubenswrapper[4948]: I1122 05:09:52.888170 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-1" event={"ID":"80f5609a-13de-49c6-8d3f-204e63efb912","Type":"ContainerDied","Data":"a4e810d38c554d793c35151ef1d752de9d0e731f4f1c66f7b3e6d96290405a37"} Nov 22 05:09:52 crc kubenswrapper[4948]: I1122 05:09:52.889488 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-1" event={"ID":"80f5609a-13de-49c6-8d3f-204e63efb912","Type":"ContainerDied","Data":"21c4afc56d994cc5c9e1f968103a8234e561fc1637ae1e7e3f6a07699534f685"} Nov 22 05:09:52 crc kubenswrapper[4948]: I1122 05:09:52.889616 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21c4afc56d994cc5c9e1f968103a8234e561fc1637ae1e7e3f6a07699534f685" Nov 22 05:09:52 crc kubenswrapper[4948]: I1122 05:09:52.908517 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.041899 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80f5609a-13de-49c6-8d3f-204e63efb912-etc-machine-id\") pod \"80f5609a-13de-49c6-8d3f-204e63efb912\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.041980 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnhnf\" (UniqueName: \"kubernetes.io/projected/80f5609a-13de-49c6-8d3f-204e63efb912-kube-api-access-rnhnf\") pod \"80f5609a-13de-49c6-8d3f-204e63efb912\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.041997 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80f5609a-13de-49c6-8d3f-204e63efb912-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "80f5609a-13de-49c6-8d3f-204e63efb912" (UID: "80f5609a-13de-49c6-8d3f-204e63efb912"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.042041 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-scripts\") pod \"80f5609a-13de-49c6-8d3f-204e63efb912\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.042064 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data\") pod \"80f5609a-13de-49c6-8d3f-204e63efb912\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.042133 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data-custom\") pod \"80f5609a-13de-49c6-8d3f-204e63efb912\" (UID: \"80f5609a-13de-49c6-8d3f-204e63efb912\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.042387 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80f5609a-13de-49c6-8d3f-204e63efb912-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.048171 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80f5609a-13de-49c6-8d3f-204e63efb912-kube-api-access-rnhnf" (OuterVolumeSpecName: "kube-api-access-rnhnf") pod "80f5609a-13de-49c6-8d3f-204e63efb912" (UID: "80f5609a-13de-49c6-8d3f-204e63efb912"). InnerVolumeSpecName "kube-api-access-rnhnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.049608 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "80f5609a-13de-49c6-8d3f-204e63efb912" (UID: "80f5609a-13de-49c6-8d3f-204e63efb912"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.049922 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-scripts" (OuterVolumeSpecName: "scripts") pod "80f5609a-13de-49c6-8d3f-204e63efb912" (UID: "80f5609a-13de-49c6-8d3f-204e63efb912"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.115763 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data" (OuterVolumeSpecName: "config-data") pod "80f5609a-13de-49c6-8d3f-204e63efb912" (UID: "80f5609a-13de-49c6-8d3f-204e63efb912"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.143925 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.143965 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.143984 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnhnf\" (UniqueName: \"kubernetes.io/projected/80f5609a-13de-49c6-8d3f-204e63efb912-kube-api-access-rnhnf\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.143995 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f5609a-13de-49c6-8d3f-204e63efb912-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.146872 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila3383-account-delete-b22nb" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.244841 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkqtm\" (UniqueName: \"kubernetes.io/projected/4c0cb4f0-d02c-4aa5-a367-7a1b0a831065-kube-api-access-gkqtm\") pod \"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065\" (UID: \"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.248430 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c0cb4f0-d02c-4aa5-a367-7a1b0a831065-kube-api-access-gkqtm" (OuterVolumeSpecName: "kube-api-access-gkqtm") pod "4c0cb4f0-d02c-4aa5-a367-7a1b0a831065" (UID: "4c0cb4f0-d02c-4aa5-a367-7a1b0a831065"). InnerVolumeSpecName "kube-api-access-gkqtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.347076 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkqtm\" (UniqueName: \"kubernetes.io/projected/4c0cb4f0-d02c-4aa5-a367-7a1b0a831065-kube-api-access-gkqtm\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.775583 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" path="/var/lib/kubelet/pods/e16f33c0-0a4c-48bf-8c2d-6828e2564977/volumes" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.869831 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.908246 4948 generic.go:334] "Generic (PLEG): container finished" podID="f95caedb-58f5-4566-b6be-d1067c181d90" containerID="ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25" exitCode=0 Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.908286 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-2" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.908326 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-2" event={"ID":"f95caedb-58f5-4566-b6be-d1067c181d90","Type":"ContainerDied","Data":"ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25"} Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.908364 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-2" event={"ID":"f95caedb-58f5-4566-b6be-d1067c181d90","Type":"ContainerDied","Data":"360286d4a3fcf3351201fe04929c315ae801a8cdc60f1c49b8af5ea75ded61d7"} Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.908382 4948 scope.go:117] "RemoveContainer" containerID="229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.910021 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila3383-account-delete-b22nb" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.910028 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila3383-account-delete-b22nb" event={"ID":"4c0cb4f0-d02c-4aa5-a367-7a1b0a831065","Type":"ContainerDied","Data":"d3366f1715dd84800264c739f5d725291d69199d41e2cfef14b0fa2e97ee269a"} Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.910063 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3366f1715dd84800264c739f5d725291d69199d41e2cfef14b0fa2e97ee269a" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.913586 4948 generic.go:334] "Generic (PLEG): container finished" podID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerID="f66f84fa7edcad39daec92e0d5390fd479721280760f42e16981eff1011e8372" exitCode=0 Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.913668 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"506bf3aa-7e96-48b6-94dd-a3b1346cb464","Type":"ContainerDied","Data":"f66f84fa7edcad39daec92e0d5390fd479721280760f42e16981eff1011e8372"} Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.913715 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-1" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.937418 4948 scope.go:117] "RemoveContainer" containerID="ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.945283 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-1"] Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.950522 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-scheduler-1"] Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.955011 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f95caedb-58f5-4566-b6be-d1067c181d90-etc-machine-id\") pod \"f95caedb-58f5-4566-b6be-d1067c181d90\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.955055 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data-custom\") pod \"f95caedb-58f5-4566-b6be-d1067c181d90\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.955076 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f95caedb-58f5-4566-b6be-d1067c181d90-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f95caedb-58f5-4566-b6be-d1067c181d90" (UID: "f95caedb-58f5-4566-b6be-d1067c181d90"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.955133 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh9wm\" (UniqueName: \"kubernetes.io/projected/f95caedb-58f5-4566-b6be-d1067c181d90-kube-api-access-xh9wm\") pod \"f95caedb-58f5-4566-b6be-d1067c181d90\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.955173 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data\") pod \"f95caedb-58f5-4566-b6be-d1067c181d90\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.955226 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-scripts\") pod \"f95caedb-58f5-4566-b6be-d1067c181d90\" (UID: \"f95caedb-58f5-4566-b6be-d1067c181d90\") " Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.955626 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f95caedb-58f5-4566-b6be-d1067c181d90-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.958848 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f95caedb-58f5-4566-b6be-d1067c181d90-kube-api-access-xh9wm" (OuterVolumeSpecName: "kube-api-access-xh9wm") pod "f95caedb-58f5-4566-b6be-d1067c181d90" (UID: "f95caedb-58f5-4566-b6be-d1067c181d90"). InnerVolumeSpecName "kube-api-access-xh9wm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.958926 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-scripts" (OuterVolumeSpecName: "scripts") pod "f95caedb-58f5-4566-b6be-d1067c181d90" (UID: "f95caedb-58f5-4566-b6be-d1067c181d90"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:53 crc kubenswrapper[4948]: I1122 05:09:53.958989 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f95caedb-58f5-4566-b6be-d1067c181d90" (UID: "f95caedb-58f5-4566-b6be-d1067c181d90"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.033002 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data" (OuterVolumeSpecName: "config-data") pod "f95caedb-58f5-4566-b6be-d1067c181d90" (UID: "f95caedb-58f5-4566-b6be-d1067c181d90"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.059260 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.059305 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xh9wm\" (UniqueName: \"kubernetes.io/projected/f95caedb-58f5-4566-b6be-d1067c181d90-kube-api-access-xh9wm\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.059323 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.059337 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f95caedb-58f5-4566-b6be-d1067c181d90-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.079604 4948 scope.go:117] "RemoveContainer" containerID="229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c" Nov 22 05:09:54 crc kubenswrapper[4948]: E1122 05:09:54.080001 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c\": container with ID starting with 229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c not found: ID does not exist" containerID="229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.080039 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c"} err="failed to get container status \"229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c\": rpc error: code = NotFound desc = could not find container \"229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c\": container with ID starting with 229d844538164fd4957588d1e81fc095a5c4213e86e84bb4ffedb9393dd57f8c not found: ID does not exist" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.080066 4948 scope.go:117] "RemoveContainer" containerID="ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25" Nov 22 05:09:54 crc kubenswrapper[4948]: E1122 05:09:54.080395 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25\": container with ID starting with ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25 not found: ID does not exist" containerID="ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.080451 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25"} err="failed to get container status \"ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25\": rpc error: code = NotFound desc = could not find container \"ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25\": container with ID starting with ebb99a65b4030a981ddb2ca4e7b00dc7a7a6b364ac9d0db1fe97764d9a4ead25 not found: ID does not exist" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.083807 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.160934 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-scripts\") pod \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.160988 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/506bf3aa-7e96-48b6-94dd-a3b1346cb464-etc-machine-id\") pod \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.161017 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfnbp\" (UniqueName: \"kubernetes.io/projected/506bf3aa-7e96-48b6-94dd-a3b1346cb464-kube-api-access-jfnbp\") pod \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.161048 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data-custom\") pod \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.161151 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data\") pod \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\" (UID: \"506bf3aa-7e96-48b6-94dd-a3b1346cb464\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.161458 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/506bf3aa-7e96-48b6-94dd-a3b1346cb464-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "506bf3aa-7e96-48b6-94dd-a3b1346cb464" (UID: "506bf3aa-7e96-48b6-94dd-a3b1346cb464"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.164672 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-scripts" (OuterVolumeSpecName: "scripts") pod "506bf3aa-7e96-48b6-94dd-a3b1346cb464" (UID: "506bf3aa-7e96-48b6-94dd-a3b1346cb464"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.168113 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "506bf3aa-7e96-48b6-94dd-a3b1346cb464" (UID: "506bf3aa-7e96-48b6-94dd-a3b1346cb464"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.174554 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/506bf3aa-7e96-48b6-94dd-a3b1346cb464-kube-api-access-jfnbp" (OuterVolumeSpecName: "kube-api-access-jfnbp") pod "506bf3aa-7e96-48b6-94dd-a3b1346cb464" (UID: "506bf3aa-7e96-48b6-94dd-a3b1346cb464"). InnerVolumeSpecName "kube-api-access-jfnbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.200158 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.241918 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-2"] Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.247929 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-scheduler-2"] Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.254574 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data" (OuterVolumeSpecName: "config-data") pod "506bf3aa-7e96-48b6-94dd-a3b1346cb464" (UID: "506bf3aa-7e96-48b6-94dd-a3b1346cb464"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262185 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0db17102-ad8c-40d5-8ff3-68eed833b9e1-etc-machine-id\") pod \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262249 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0db17102-ad8c-40d5-8ff3-68eed833b9e1-logs\") pod \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262295 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psmp5\" (UniqueName: \"kubernetes.io/projected/0db17102-ad8c-40d5-8ff3-68eed833b9e1-kube-api-access-psmp5\") pod \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262348 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data\") pod \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262518 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-scripts\") pod \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262586 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data-custom\") pod \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\" (UID: \"0db17102-ad8c-40d5-8ff3-68eed833b9e1\") " Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262785 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0db17102-ad8c-40d5-8ff3-68eed833b9e1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0db17102-ad8c-40d5-8ff3-68eed833b9e1" (UID: "0db17102-ad8c-40d5-8ff3-68eed833b9e1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262862 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0db17102-ad8c-40d5-8ff3-68eed833b9e1-logs" (OuterVolumeSpecName: "logs") pod "0db17102-ad8c-40d5-8ff3-68eed833b9e1" (UID: "0db17102-ad8c-40d5-8ff3-68eed833b9e1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.262985 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.263011 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.263028 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/506bf3aa-7e96-48b6-94dd-a3b1346cb464-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.263044 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfnbp\" (UniqueName: \"kubernetes.io/projected/506bf3aa-7e96-48b6-94dd-a3b1346cb464-kube-api-access-jfnbp\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.263060 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/506bf3aa-7e96-48b6-94dd-a3b1346cb464-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.263076 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0db17102-ad8c-40d5-8ff3-68eed833b9e1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.265803 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0db17102-ad8c-40d5-8ff3-68eed833b9e1" (UID: "0db17102-ad8c-40d5-8ff3-68eed833b9e1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.266431 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-scripts" (OuterVolumeSpecName: "scripts") pod "0db17102-ad8c-40d5-8ff3-68eed833b9e1" (UID: "0db17102-ad8c-40d5-8ff3-68eed833b9e1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.267580 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0db17102-ad8c-40d5-8ff3-68eed833b9e1-kube-api-access-psmp5" (OuterVolumeSpecName: "kube-api-access-psmp5") pod "0db17102-ad8c-40d5-8ff3-68eed833b9e1" (UID: "0db17102-ad8c-40d5-8ff3-68eed833b9e1"). InnerVolumeSpecName "kube-api-access-psmp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.301169 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data" (OuterVolumeSpecName: "config-data") pod "0db17102-ad8c-40d5-8ff3-68eed833b9e1" (UID: "0db17102-ad8c-40d5-8ff3-68eed833b9e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.364361 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.364517 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.364534 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0db17102-ad8c-40d5-8ff3-68eed833b9e1-logs\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.364546 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psmp5\" (UniqueName: \"kubernetes.io/projected/0db17102-ad8c-40d5-8ff3-68eed833b9e1-kube-api-access-psmp5\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.364557 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db17102-ad8c-40d5-8ff3-68eed833b9e1-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.924841 4948 generic.go:334] "Generic (PLEG): container finished" podID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerID="5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134" exitCode=0 Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.925055 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"0db17102-ad8c-40d5-8ff3-68eed833b9e1","Type":"ContainerDied","Data":"5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134"} Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.925119 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.925159 4948 scope.go:117] "RemoveContainer" containerID="5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.925140 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"0db17102-ad8c-40d5-8ff3-68eed833b9e1","Type":"ContainerDied","Data":"88d18faafbd9ca01ccd5fa3bcccc11f81cf9a517d1dd26bbf01e77a58fb48e5b"} Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.930084 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"506bf3aa-7e96-48b6-94dd-a3b1346cb464","Type":"ContainerDied","Data":"0dddafcbab0c496104b537a561af6045447657c5aaabfed018ef5d80932dce98"} Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.930185 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.964093 4948 scope.go:117] "RemoveContainer" containerID="4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.967226 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.975608 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.981952 4948 scope.go:117] "RemoveContainer" containerID="5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.981961 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:09:54 crc kubenswrapper[4948]: E1122 05:09:54.982395 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134\": container with ID starting with 5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134 not found: ID does not exist" containerID="5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.982422 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134"} err="failed to get container status \"5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134\": rpc error: code = NotFound desc = could not find container \"5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134\": container with ID starting with 5cb3db8f9f2bc5925845fad05c44c00d69852a82ae0ca7e0bd9ff77c9ccf1134 not found: ID does not exist" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.982444 4948 scope.go:117] "RemoveContainer" containerID="4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62" Nov 22 05:09:54 crc kubenswrapper[4948]: E1122 05:09:54.982772 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62\": container with ID starting with 4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62 not found: ID does not exist" containerID="4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.982811 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62"} err="failed to get container status \"4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62\": rpc error: code = NotFound desc = could not find container \"4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62\": container with ID starting with 4f6b043541e9a8a8fb37d51016e1155d94f9dcf7b293751551d6ae1b7bfeba62 not found: ID does not exist" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.982839 4948 scope.go:117] "RemoveContainer" containerID="7b5b40df80a167d5dfee079706b15f8ca911922b576902a45ddf9d49f221c373" Nov 22 05:09:54 crc kubenswrapper[4948]: I1122 05:09:54.993516 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.000318 4948 scope.go:117] "RemoveContainer" containerID="f66f84fa7edcad39daec92e0d5390fd479721280760f42e16981eff1011e8372" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.715833 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-db-create-qhh2s"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.725808 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-db-create-qhh2s"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.734197 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila3383-account-delete-b22nb"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.740101 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-3383-account-create-m88tv"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.748661 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-3383-account-create-m88tv"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.753266 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila3383-account-delete-b22nb"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.774428 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06c3bb7d-c3ce-42e2-a6c2-305893a6d59e" path="/var/lib/kubelet/pods/06c3bb7d-c3ce-42e2-a6c2-305893a6d59e/volumes" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.776186 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" path="/var/lib/kubelet/pods/0db17102-ad8c-40d5-8ff3-68eed833b9e1/volumes" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.777590 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c0cb4f0-d02c-4aa5-a367-7a1b0a831065" path="/var/lib/kubelet/pods/4c0cb4f0-d02c-4aa5-a367-7a1b0a831065/volumes" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.780177 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" path="/var/lib/kubelet/pods/506bf3aa-7e96-48b6-94dd-a3b1346cb464/volumes" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.781322 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" path="/var/lib/kubelet/pods/80f5609a-13de-49c6-8d3f-204e63efb912/volumes" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.782718 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abbc22fc-4f38-45dd-bc95-f7e5fc647919" path="/var/lib/kubelet/pods/abbc22fc-4f38-45dd-bc95-f7e5fc647919/volumes" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.783711 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" path="/var/lib/kubelet/pods/f95caedb-58f5-4566-b6be-d1067c181d90/volumes" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.897197 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-db-create-cz6pn"] Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.897882 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c0cb4f0-d02c-4aa5-a367-7a1b0a831065" containerName="mariadb-account-delete" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.897907 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c0cb4f0-d02c-4aa5-a367-7a1b0a831065" containerName="mariadb-account-delete" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.897934 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.897944 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.897967 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.897977 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.897989 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898000 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.898009 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898040 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.898061 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api-log" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898070 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api-log" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.898091 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898099 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.898115 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="manila-share" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898124 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="manila-share" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.898140 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898150 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.898167 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898177 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: E1122 05:09:55.898192 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898201 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898389 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="manila-share" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898407 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898420 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898436 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898448 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c0cb4f0-d02c-4aa5-a367-7a1b0a831065" containerName="mariadb-account-delete" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898498 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="80f5609a-13de-49c6-8d3f-204e63efb912" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898514 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0db17102-ad8c-40d5-8ff3-68eed833b9e1" containerName="manila-api-log" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898527 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898543 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="506bf3aa-7e96-48b6-94dd-a3b1346cb464" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898560 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f95caedb-58f5-4566-b6be-d1067c181d90" containerName="manila-scheduler" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.898577 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e16f33c0-0a4c-48bf-8c2d-6828e2564977" containerName="probe" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.899205 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-cz6pn" Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.908276 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-create-cz6pn"] Nov 22 05:09:55 crc kubenswrapper[4948]: I1122 05:09:55.994804 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b642\" (UniqueName: \"kubernetes.io/projected/99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1-kube-api-access-4b642\") pod \"manila-db-create-cz6pn\" (UID: \"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1\") " pod="manila-kuttl-tests/manila-db-create-cz6pn" Nov 22 05:09:56 crc kubenswrapper[4948]: I1122 05:09:56.096717 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b642\" (UniqueName: \"kubernetes.io/projected/99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1-kube-api-access-4b642\") pod \"manila-db-create-cz6pn\" (UID: \"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1\") " pod="manila-kuttl-tests/manila-db-create-cz6pn" Nov 22 05:09:56 crc kubenswrapper[4948]: I1122 05:09:56.127652 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b642\" (UniqueName: \"kubernetes.io/projected/99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1-kube-api-access-4b642\") pod \"manila-db-create-cz6pn\" (UID: \"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1\") " pod="manila-kuttl-tests/manila-db-create-cz6pn" Nov 22 05:09:56 crc kubenswrapper[4948]: I1122 05:09:56.229452 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-cz6pn" Nov 22 05:09:56 crc kubenswrapper[4948]: I1122 05:09:56.747642 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-create-cz6pn"] Nov 22 05:09:56 crc kubenswrapper[4948]: I1122 05:09:56.951900 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-cz6pn" event={"ID":"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1","Type":"ContainerStarted","Data":"f8b69cf32befe7afd8cdeee0fd5606fd35ae945b0ae1ab07971c30cde9a0ae64"} Nov 22 05:09:56 crc kubenswrapper[4948]: I1122 05:09:56.951967 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-cz6pn" event={"ID":"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1","Type":"ContainerStarted","Data":"1845758bca4e07d206c8e7384dbde53d5a9095b222a981ad1c841494c7904e32"} Nov 22 05:09:57 crc kubenswrapper[4948]: I1122 05:09:57.966530 4948 generic.go:334] "Generic (PLEG): container finished" podID="99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1" containerID="f8b69cf32befe7afd8cdeee0fd5606fd35ae945b0ae1ab07971c30cde9a0ae64" exitCode=0 Nov 22 05:09:57 crc kubenswrapper[4948]: I1122 05:09:57.966596 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-cz6pn" event={"ID":"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1","Type":"ContainerDied","Data":"f8b69cf32befe7afd8cdeee0fd5606fd35ae945b0ae1ab07971c30cde9a0ae64"} Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.672520 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b4n5m"] Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.674058 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.689099 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b4n5m"] Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.742938 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-utilities\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.743204 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j7kr\" (UniqueName: \"kubernetes.io/projected/7aa12c71-a181-46a9-84d8-51dddf3c0c73-kube-api-access-6j7kr\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.743354 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-catalog-content\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.844635 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-utilities\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.844724 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j7kr\" (UniqueName: \"kubernetes.io/projected/7aa12c71-a181-46a9-84d8-51dddf3c0c73-kube-api-access-6j7kr\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.844744 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-catalog-content\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.845615 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-utilities\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.845925 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-catalog-content\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.866989 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j7kr\" (UniqueName: \"kubernetes.io/projected/7aa12c71-a181-46a9-84d8-51dddf3c0c73-kube-api-access-6j7kr\") pod \"redhat-marketplace-b4n5m\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:58 crc kubenswrapper[4948]: I1122 05:09:58.990513 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.246343 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b4n5m"] Nov 22 05:09:59 crc kubenswrapper[4948]: W1122 05:09:59.246568 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7aa12c71_a181_46a9_84d8_51dddf3c0c73.slice/crio-bbc0cc7d0ba4bde3444223608781b54430a04209260ff70702beb350c3b17328 WatchSource:0}: Error finding container bbc0cc7d0ba4bde3444223608781b54430a04209260ff70702beb350c3b17328: Status 404 returned error can't find the container with id bbc0cc7d0ba4bde3444223608781b54430a04209260ff70702beb350c3b17328 Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.259269 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-cz6pn" Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.352247 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4b642\" (UniqueName: \"kubernetes.io/projected/99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1-kube-api-access-4b642\") pod \"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1\" (UID: \"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1\") " Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.357397 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1-kube-api-access-4b642" (OuterVolumeSpecName: "kube-api-access-4b642") pod "99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1" (UID: "99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1"). InnerVolumeSpecName "kube-api-access-4b642". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.454059 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4b642\" (UniqueName: \"kubernetes.io/projected/99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1-kube-api-access-4b642\") on node \"crc\" DevicePath \"\"" Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.985874 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-cz6pn" event={"ID":"99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1","Type":"ContainerDied","Data":"1845758bca4e07d206c8e7384dbde53d5a9095b222a981ad1c841494c7904e32"} Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.985905 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-cz6pn" Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.985913 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1845758bca4e07d206c8e7384dbde53d5a9095b222a981ad1c841494c7904e32" Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.987775 4948 generic.go:334] "Generic (PLEG): container finished" podID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerID="adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de" exitCode=0 Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.987801 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b4n5m" event={"ID":"7aa12c71-a181-46a9-84d8-51dddf3c0c73","Type":"ContainerDied","Data":"adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de"} Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.987819 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b4n5m" event={"ID":"7aa12c71-a181-46a9-84d8-51dddf3c0c73","Type":"ContainerStarted","Data":"bbc0cc7d0ba4bde3444223608781b54430a04209260ff70702beb350c3b17328"} Nov 22 05:09:59 crc kubenswrapper[4948]: I1122 05:09:59.990822 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 05:10:00 crc kubenswrapper[4948]: I1122 05:10:00.999225 4948 generic.go:334] "Generic (PLEG): container finished" podID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerID="c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014" exitCode=0 Nov 22 05:10:00 crc kubenswrapper[4948]: I1122 05:10:00.999295 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b4n5m" event={"ID":"7aa12c71-a181-46a9-84d8-51dddf3c0c73","Type":"ContainerDied","Data":"c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014"} Nov 22 05:10:02 crc kubenswrapper[4948]: I1122 05:10:02.012120 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b4n5m" event={"ID":"7aa12c71-a181-46a9-84d8-51dddf3c0c73","Type":"ContainerStarted","Data":"c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4"} Nov 22 05:10:02 crc kubenswrapper[4948]: I1122 05:10:02.042013 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b4n5m" podStartSLOduration=2.610940779 podStartE2EDuration="4.041984294s" podCreationTimestamp="2025-11-22 05:09:58 +0000 UTC" firstStartedPulling="2025-11-22 05:09:59.99060664 +0000 UTC m=+1402.676617156" lastFinishedPulling="2025-11-22 05:10:01.421650125 +0000 UTC m=+1404.107660671" observedRunningTime="2025-11-22 05:10:02.037711212 +0000 UTC m=+1404.723721818" watchObservedRunningTime="2025-11-22 05:10:02.041984294 +0000 UTC m=+1404.727994860" Nov 22 05:10:05 crc kubenswrapper[4948]: I1122 05:10:05.944907 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-1218-account-create-wdsdf"] Nov 22 05:10:05 crc kubenswrapper[4948]: E1122 05:10:05.945668 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1" containerName="mariadb-database-create" Nov 22 05:10:05 crc kubenswrapper[4948]: I1122 05:10:05.945711 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1" containerName="mariadb-database-create" Nov 22 05:10:05 crc kubenswrapper[4948]: I1122 05:10:05.945922 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1" containerName="mariadb-database-create" Nov 22 05:10:05 crc kubenswrapper[4948]: I1122 05:10:05.946765 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" Nov 22 05:10:05 crc kubenswrapper[4948]: I1122 05:10:05.951012 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-db-secret" Nov 22 05:10:05 crc kubenswrapper[4948]: I1122 05:10:05.960503 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-1218-account-create-wdsdf"] Nov 22 05:10:06 crc kubenswrapper[4948]: I1122 05:10:06.049937 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4v57\" (UniqueName: \"kubernetes.io/projected/82f9bffa-3363-4655-9e9f-25120440235d-kube-api-access-l4v57\") pod \"manila-1218-account-create-wdsdf\" (UID: \"82f9bffa-3363-4655-9e9f-25120440235d\") " pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" Nov 22 05:10:06 crc kubenswrapper[4948]: I1122 05:10:06.153358 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4v57\" (UniqueName: \"kubernetes.io/projected/82f9bffa-3363-4655-9e9f-25120440235d-kube-api-access-l4v57\") pod \"manila-1218-account-create-wdsdf\" (UID: \"82f9bffa-3363-4655-9e9f-25120440235d\") " pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" Nov 22 05:10:06 crc kubenswrapper[4948]: I1122 05:10:06.194721 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4v57\" (UniqueName: \"kubernetes.io/projected/82f9bffa-3363-4655-9e9f-25120440235d-kube-api-access-l4v57\") pod \"manila-1218-account-create-wdsdf\" (UID: \"82f9bffa-3363-4655-9e9f-25120440235d\") " pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" Nov 22 05:10:06 crc kubenswrapper[4948]: I1122 05:10:06.278946 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" Nov 22 05:10:06 crc kubenswrapper[4948]: I1122 05:10:06.798830 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-1218-account-create-wdsdf"] Nov 22 05:10:07 crc kubenswrapper[4948]: I1122 05:10:07.052271 4948 generic.go:334] "Generic (PLEG): container finished" podID="82f9bffa-3363-4655-9e9f-25120440235d" containerID="999a9e6e81ccc112265cf6828f3d2e01f2dca86d41bf70f40279a5b91f22afdb" exitCode=0 Nov 22 05:10:07 crc kubenswrapper[4948]: I1122 05:10:07.052336 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" event={"ID":"82f9bffa-3363-4655-9e9f-25120440235d","Type":"ContainerDied","Data":"999a9e6e81ccc112265cf6828f3d2e01f2dca86d41bf70f40279a5b91f22afdb"} Nov 22 05:10:07 crc kubenswrapper[4948]: I1122 05:10:07.052376 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" event={"ID":"82f9bffa-3363-4655-9e9f-25120440235d","Type":"ContainerStarted","Data":"f9d7038766bced9435a6e0177a6c10cc7c90a95b4a32f72cae4d03ef4de293fc"} Nov 22 05:10:08 crc kubenswrapper[4948]: I1122 05:10:08.338299 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" Nov 22 05:10:08 crc kubenswrapper[4948]: I1122 05:10:08.484499 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4v57\" (UniqueName: \"kubernetes.io/projected/82f9bffa-3363-4655-9e9f-25120440235d-kube-api-access-l4v57\") pod \"82f9bffa-3363-4655-9e9f-25120440235d\" (UID: \"82f9bffa-3363-4655-9e9f-25120440235d\") " Nov 22 05:10:08 crc kubenswrapper[4948]: I1122 05:10:08.489273 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82f9bffa-3363-4655-9e9f-25120440235d-kube-api-access-l4v57" (OuterVolumeSpecName: "kube-api-access-l4v57") pod "82f9bffa-3363-4655-9e9f-25120440235d" (UID: "82f9bffa-3363-4655-9e9f-25120440235d"). InnerVolumeSpecName "kube-api-access-l4v57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:08 crc kubenswrapper[4948]: I1122 05:10:08.586430 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4v57\" (UniqueName: \"kubernetes.io/projected/82f9bffa-3363-4655-9e9f-25120440235d-kube-api-access-l4v57\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:08 crc kubenswrapper[4948]: I1122 05:10:08.991022 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:10:08 crc kubenswrapper[4948]: I1122 05:10:08.991072 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:10:09 crc kubenswrapper[4948]: I1122 05:10:09.029597 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:10:09 crc kubenswrapper[4948]: I1122 05:10:09.068541 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" Nov 22 05:10:09 crc kubenswrapper[4948]: I1122 05:10:09.068596 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-1218-account-create-wdsdf" event={"ID":"82f9bffa-3363-4655-9e9f-25120440235d","Type":"ContainerDied","Data":"f9d7038766bced9435a6e0177a6c10cc7c90a95b4a32f72cae4d03ef4de293fc"} Nov 22 05:10:09 crc kubenswrapper[4948]: I1122 05:10:09.068655 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9d7038766bced9435a6e0177a6c10cc7c90a95b4a32f72cae4d03ef4de293fc" Nov 22 05:10:09 crc kubenswrapper[4948]: I1122 05:10:09.107382 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.208211 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-db-sync-q6cc2"] Nov 22 05:10:11 crc kubenswrapper[4948]: E1122 05:10:11.208929 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82f9bffa-3363-4655-9e9f-25120440235d" containerName="mariadb-account-create" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.208950 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="82f9bffa-3363-4655-9e9f-25120440235d" containerName="mariadb-account-create" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.209161 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="82f9bffa-3363-4655-9e9f-25120440235d" containerName="mariadb-account-create" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.209997 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.211812 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-manila-dockercfg-gpfjc" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.213540 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"combined-ca-bundle" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.214993 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-config-data" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.221451 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-sync-q6cc2"] Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.326447 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-job-config-data\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.326694 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb6mn\" (UniqueName: \"kubernetes.io/projected/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-kube-api-access-mb6mn\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.326748 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-combined-ca-bundle\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.326793 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-config-data\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.428602 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb6mn\" (UniqueName: \"kubernetes.io/projected/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-kube-api-access-mb6mn\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.428697 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-combined-ca-bundle\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.428746 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-config-data\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.428841 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-job-config-data\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.434297 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-job-config-data\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.435835 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-combined-ca-bundle\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.436059 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-config-data\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.468871 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb6mn\" (UniqueName: \"kubernetes.io/projected/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-kube-api-access-mb6mn\") pod \"manila-db-sync-q6cc2\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.540141 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:11 crc kubenswrapper[4948]: I1122 05:10:11.814808 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-sync-q6cc2"] Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.096446 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-q6cc2" event={"ID":"3956af07-f2c2-4ccb-8556-d1cb4fe922cc","Type":"ContainerStarted","Data":"5a3c14f7c4f12e1d52be017cd077608a33dba1fbc55ba544c1bfad8c4c1db9c8"} Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.468389 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b4n5m"] Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.469401 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b4n5m" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="registry-server" containerID="cri-o://c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4" gracePeriod=2 Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.876561 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.953327 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6j7kr\" (UniqueName: \"kubernetes.io/projected/7aa12c71-a181-46a9-84d8-51dddf3c0c73-kube-api-access-6j7kr\") pod \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.953507 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-utilities\") pod \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.953631 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-catalog-content\") pod \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\" (UID: \"7aa12c71-a181-46a9-84d8-51dddf3c0c73\") " Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.954667 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-utilities" (OuterVolumeSpecName: "utilities") pod "7aa12c71-a181-46a9-84d8-51dddf3c0c73" (UID: "7aa12c71-a181-46a9-84d8-51dddf3c0c73"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.969299 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7aa12c71-a181-46a9-84d8-51dddf3c0c73-kube-api-access-6j7kr" (OuterVolumeSpecName: "kube-api-access-6j7kr") pod "7aa12c71-a181-46a9-84d8-51dddf3c0c73" (UID: "7aa12c71-a181-46a9-84d8-51dddf3c0c73"). InnerVolumeSpecName "kube-api-access-6j7kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:12 crc kubenswrapper[4948]: I1122 05:10:12.979903 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7aa12c71-a181-46a9-84d8-51dddf3c0c73" (UID: "7aa12c71-a181-46a9-84d8-51dddf3c0c73"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.055972 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.056031 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa12c71-a181-46a9-84d8-51dddf3c0c73-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.056057 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6j7kr\" (UniqueName: \"kubernetes.io/projected/7aa12c71-a181-46a9-84d8-51dddf3c0c73-kube-api-access-6j7kr\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.106531 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-q6cc2" event={"ID":"3956af07-f2c2-4ccb-8556-d1cb4fe922cc","Type":"ContainerStarted","Data":"aa8c815a806327d034f2d01fb667422ee133bb07e49ebf3856c76944215fec1a"} Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.110225 4948 generic.go:334] "Generic (PLEG): container finished" podID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerID="c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4" exitCode=0 Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.110263 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b4n5m" event={"ID":"7aa12c71-a181-46a9-84d8-51dddf3c0c73","Type":"ContainerDied","Data":"c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4"} Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.110318 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b4n5m" event={"ID":"7aa12c71-a181-46a9-84d8-51dddf3c0c73","Type":"ContainerDied","Data":"bbc0cc7d0ba4bde3444223608781b54430a04209260ff70702beb350c3b17328"} Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.110349 4948 scope.go:117] "RemoveContainer" containerID="c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.110355 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b4n5m" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.130938 4948 scope.go:117] "RemoveContainer" containerID="c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.139210 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-db-sync-q6cc2" podStartSLOduration=2.13917542 podStartE2EDuration="2.13917542s" podCreationTimestamp="2025-11-22 05:10:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:10:13.130878264 +0000 UTC m=+1415.816888810" watchObservedRunningTime="2025-11-22 05:10:13.13917542 +0000 UTC m=+1415.825185986" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.157724 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b4n5m"] Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.159916 4948 scope.go:117] "RemoveContainer" containerID="adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.168118 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b4n5m"] Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.186931 4948 scope.go:117] "RemoveContainer" containerID="c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4" Nov 22 05:10:13 crc kubenswrapper[4948]: E1122 05:10:13.187407 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4\": container with ID starting with c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4 not found: ID does not exist" containerID="c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.187446 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4"} err="failed to get container status \"c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4\": rpc error: code = NotFound desc = could not find container \"c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4\": container with ID starting with c31629fe7238e526307ebfbf56ed2650cfba3722545734f2e8267b83eb311cd4 not found: ID does not exist" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.187517 4948 scope.go:117] "RemoveContainer" containerID="c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014" Nov 22 05:10:13 crc kubenswrapper[4948]: E1122 05:10:13.187950 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014\": container with ID starting with c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014 not found: ID does not exist" containerID="c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.188007 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014"} err="failed to get container status \"c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014\": rpc error: code = NotFound desc = could not find container \"c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014\": container with ID starting with c2d8dfe886ebf1607237d8610bbd3a8227de4fbf44f9f1dd49bf6ee26376c014 not found: ID does not exist" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.188048 4948 scope.go:117] "RemoveContainer" containerID="adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de" Nov 22 05:10:13 crc kubenswrapper[4948]: E1122 05:10:13.188415 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de\": container with ID starting with adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de not found: ID does not exist" containerID="adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.188457 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de"} err="failed to get container status \"adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de\": rpc error: code = NotFound desc = could not find container \"adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de\": container with ID starting with adea8a0eaeaeb3b1ec71309b0047ad9380b220a2fab94ef953ba5a2119f970de not found: ID does not exist" Nov 22 05:10:13 crc kubenswrapper[4948]: I1122 05:10:13.767161 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" path="/var/lib/kubelet/pods/7aa12c71-a181-46a9-84d8-51dddf3c0c73/volumes" Nov 22 05:10:14 crc kubenswrapper[4948]: I1122 05:10:14.123222 4948 generic.go:334] "Generic (PLEG): container finished" podID="3956af07-f2c2-4ccb-8556-d1cb4fe922cc" containerID="aa8c815a806327d034f2d01fb667422ee133bb07e49ebf3856c76944215fec1a" exitCode=0 Nov 22 05:10:14 crc kubenswrapper[4948]: I1122 05:10:14.123314 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-q6cc2" event={"ID":"3956af07-f2c2-4ccb-8556-d1cb4fe922cc","Type":"ContainerDied","Data":"aa8c815a806327d034f2d01fb667422ee133bb07e49ebf3856c76944215fec1a"} Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.433053 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.494216 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-job-config-data\") pod \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.494300 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-combined-ca-bundle\") pod \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.494352 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb6mn\" (UniqueName: \"kubernetes.io/projected/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-kube-api-access-mb6mn\") pod \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.494378 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-config-data\") pod \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\" (UID: \"3956af07-f2c2-4ccb-8556-d1cb4fe922cc\") " Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.499181 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-kube-api-access-mb6mn" (OuterVolumeSpecName: "kube-api-access-mb6mn") pod "3956af07-f2c2-4ccb-8556-d1cb4fe922cc" (UID: "3956af07-f2c2-4ccb-8556-d1cb4fe922cc"). InnerVolumeSpecName "kube-api-access-mb6mn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.499801 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "3956af07-f2c2-4ccb-8556-d1cb4fe922cc" (UID: "3956af07-f2c2-4ccb-8556-d1cb4fe922cc"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.502048 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-config-data" (OuterVolumeSpecName: "config-data") pod "3956af07-f2c2-4ccb-8556-d1cb4fe922cc" (UID: "3956af07-f2c2-4ccb-8556-d1cb4fe922cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.518686 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3956af07-f2c2-4ccb-8556-d1cb4fe922cc" (UID: "3956af07-f2c2-4ccb-8556-d1cb4fe922cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.595850 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.595896 4948 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.595908 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:15 crc kubenswrapper[4948]: I1122 05:10:15.595919 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb6mn\" (UniqueName: \"kubernetes.io/projected/3956af07-f2c2-4ccb-8556-d1cb4fe922cc-kube-api-access-mb6mn\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.145759 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-q6cc2" event={"ID":"3956af07-f2c2-4ccb-8556-d1cb4fe922cc","Type":"ContainerDied","Data":"5a3c14f7c4f12e1d52be017cd077608a33dba1fbc55ba544c1bfad8c4c1db9c8"} Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.145835 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a3c14f7c4f12e1d52be017cd077608a33dba1fbc55ba544c1bfad8c4c1db9c8" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.145836 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-q6cc2" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.506184 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:10:16 crc kubenswrapper[4948]: E1122 05:10:16.506504 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="extract-content" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.506523 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="extract-content" Nov 22 05:10:16 crc kubenswrapper[4948]: E1122 05:10:16.506551 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3956af07-f2c2-4ccb-8556-d1cb4fe922cc" containerName="manila-db-sync" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.506561 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="3956af07-f2c2-4ccb-8556-d1cb4fe922cc" containerName="manila-db-sync" Nov 22 05:10:16 crc kubenswrapper[4948]: E1122 05:10:16.506578 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="extract-utilities" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.506588 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="extract-utilities" Nov 22 05:10:16 crc kubenswrapper[4948]: E1122 05:10:16.506607 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="registry-server" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.506618 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="registry-server" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.506785 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="3956af07-f2c2-4ccb-8556-d1cb4fe922cc" containerName="manila-db-sync" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.506805 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7aa12c71-a181-46a9-84d8-51dddf3c0c73" containerName="registry-server" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.507868 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.513835 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-config-data" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.514001 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-manila-dockercfg-gpfjc" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.514155 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-scripts" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.514324 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"combined-ca-bundle" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.514396 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-share-share0-config-data" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.514330 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"ceph-conf-files" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.545542 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.562965 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.564119 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.565966 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-scheduler-config-data" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.572446 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610011 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610065 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9559\" (UniqueName: \"kubernetes.io/projected/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-kube-api-access-r9559\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610089 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-ceph\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610136 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610149 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610165 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-scripts\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610185 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610207 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hq996\" (UniqueName: \"kubernetes.io/projected/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-kube-api-access-hq996\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610223 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610242 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-combined-ca-bundle\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610262 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-scripts\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610283 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.610313 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.626409 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.627680 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.630174 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"cert-manila-public-svc" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.630256 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"cert-manila-internal-svc" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.630305 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-api-config-data" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.645715 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712107 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5t4d\" (UniqueName: \"kubernetes.io/projected/158cb42b-1482-47bb-ba07-6357ec2a8562-kube-api-access-t5t4d\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712494 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712526 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-scripts\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712553 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/158cb42b-1482-47bb-ba07-6357ec2a8562-etc-machine-id\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712585 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712618 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-public-tls-certs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712618 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712647 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9559\" (UniqueName: \"kubernetes.io/projected/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-kube-api-access-r9559\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712717 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/158cb42b-1482-47bb-ba07-6357ec2a8562-logs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712752 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-ceph\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712782 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712814 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data-custom\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712880 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712916 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712933 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712962 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.712989 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-internal-tls-certs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713027 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-scripts\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713052 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713091 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hq996\" (UniqueName: \"kubernetes.io/projected/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-kube-api-access-hq996\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713114 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713146 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-combined-ca-bundle\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713170 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-scripts\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713197 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.713272 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.715569 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.716731 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.719445 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.719659 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.720375 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.720679 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-scripts\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.721813 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-combined-ca-bundle\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.722162 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-ceph\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.725164 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-scripts\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.727821 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.735540 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9559\" (UniqueName: \"kubernetes.io/projected/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-kube-api-access-r9559\") pod \"manila-share-share0-0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.753094 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hq996\" (UniqueName: \"kubernetes.io/projected/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-kube-api-access-hq996\") pod \"manila-scheduler-0\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.814877 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5t4d\" (UniqueName: \"kubernetes.io/projected/158cb42b-1482-47bb-ba07-6357ec2a8562-kube-api-access-t5t4d\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.814932 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-scripts\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.814961 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/158cb42b-1482-47bb-ba07-6357ec2a8562-etc-machine-id\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.815009 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-public-tls-certs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.815057 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/158cb42b-1482-47bb-ba07-6357ec2a8562-logs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.815083 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.815107 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data-custom\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.815154 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-internal-tls-certs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.815175 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.816498 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/158cb42b-1482-47bb-ba07-6357ec2a8562-etc-machine-id\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.817059 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/158cb42b-1482-47bb-ba07-6357ec2a8562-logs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.818227 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.819139 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-scripts\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.819811 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-internal-tls-certs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.822038 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.822249 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data-custom\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.822265 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.834403 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5t4d\" (UniqueName: \"kubernetes.io/projected/158cb42b-1482-47bb-ba07-6357ec2a8562-kube-api-access-t5t4d\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.838860 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-public-tls-certs\") pod \"manila-api-0\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.889399 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:16 crc kubenswrapper[4948]: I1122 05:10:16.940384 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:17 crc kubenswrapper[4948]: I1122 05:10:17.080866 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:10:17 crc kubenswrapper[4948]: I1122 05:10:17.156136 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0","Type":"ContainerStarted","Data":"0ee804459d97c53b2b235e76e666404f5acf4bf96821e700e6a7b09ca96ebbbb"} Nov 22 05:10:18 crc kubenswrapper[4948]: I1122 05:10:18.101764 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:10:18 crc kubenswrapper[4948]: W1122 05:10:18.106650 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ca2cec1_b6e4_4be9_8f30_b6b410ac86ee.slice/crio-2300fd7491692b1a416044954d1b1bacaef3194a37547c01b7a5c632e4404215 WatchSource:0}: Error finding container 2300fd7491692b1a416044954d1b1bacaef3194a37547c01b7a5c632e4404215: Status 404 returned error can't find the container with id 2300fd7491692b1a416044954d1b1bacaef3194a37547c01b7a5c632e4404215 Nov 22 05:10:18 crc kubenswrapper[4948]: I1122 05:10:18.128350 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:10:18 crc kubenswrapper[4948]: I1122 05:10:18.187493 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0","Type":"ContainerStarted","Data":"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996"} Nov 22 05:10:18 crc kubenswrapper[4948]: I1122 05:10:18.187552 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0","Type":"ContainerStarted","Data":"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c"} Nov 22 05:10:18 crc kubenswrapper[4948]: I1122 05:10:18.188778 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee","Type":"ContainerStarted","Data":"2300fd7491692b1a416044954d1b1bacaef3194a37547c01b7a5c632e4404215"} Nov 22 05:10:18 crc kubenswrapper[4948]: I1122 05:10:18.190878 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"158cb42b-1482-47bb-ba07-6357ec2a8562","Type":"ContainerStarted","Data":"8d17ca7348da4c7c84d2bb2c0a517f9c55d69d70a2887a94cfc71cdf45987e43"} Nov 22 05:10:18 crc kubenswrapper[4948]: I1122 05:10:18.208116 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-share-share0-0" podStartSLOduration=2.2080948400000002 podStartE2EDuration="2.20809484s" podCreationTimestamp="2025-11-22 05:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:10:18.204717724 +0000 UTC m=+1420.890728260" watchObservedRunningTime="2025-11-22 05:10:18.20809484 +0000 UTC m=+1420.894105356" Nov 22 05:10:19 crc kubenswrapper[4948]: I1122 05:10:19.199269 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee","Type":"ContainerStarted","Data":"1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f"} Nov 22 05:10:19 crc kubenswrapper[4948]: I1122 05:10:19.199877 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee","Type":"ContainerStarted","Data":"59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73"} Nov 22 05:10:19 crc kubenswrapper[4948]: I1122 05:10:19.203121 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"158cb42b-1482-47bb-ba07-6357ec2a8562","Type":"ContainerStarted","Data":"aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c"} Nov 22 05:10:19 crc kubenswrapper[4948]: I1122 05:10:19.203175 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"158cb42b-1482-47bb-ba07-6357ec2a8562","Type":"ContainerStarted","Data":"9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb"} Nov 22 05:10:19 crc kubenswrapper[4948]: I1122 05:10:19.218999 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-scheduler-0" podStartSLOduration=3.21897746 podStartE2EDuration="3.21897746s" podCreationTimestamp="2025-11-22 05:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:10:19.218870077 +0000 UTC m=+1421.904880603" watchObservedRunningTime="2025-11-22 05:10:19.21897746 +0000 UTC m=+1421.904987976" Nov 22 05:10:19 crc kubenswrapper[4948]: I1122 05:10:19.248203 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-api-0" podStartSLOduration=3.24818421 podStartE2EDuration="3.24818421s" podCreationTimestamp="2025-11-22 05:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:10:19.242965892 +0000 UTC m=+1421.928976408" watchObservedRunningTime="2025-11-22 05:10:19.24818421 +0000 UTC m=+1421.934194726" Nov 22 05:10:20 crc kubenswrapper[4948]: I1122 05:10:20.209425 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:26 crc kubenswrapper[4948]: I1122 05:10:26.823089 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:26 crc kubenswrapper[4948]: I1122 05:10:26.890406 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:38 crc kubenswrapper[4948]: I1122 05:10:38.297826 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:38 crc kubenswrapper[4948]: I1122 05:10:38.427267 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:38 crc kubenswrapper[4948]: I1122 05:10:38.562818 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.423565 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-db-sync-q6cc2"] Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.432334 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-db-sync-q6cc2"] Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.463946 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila1218-account-delete-5kpgc"] Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.464951 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.473729 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila1218-account-delete-5kpgc"] Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.517061 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.531752 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.532764 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share0-0" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="manila-share" containerID="cri-o://9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c" gracePeriod=30 Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.533070 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share0-0" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="probe" containerID="cri-o://df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996" gracePeriod=30 Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.533263 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-0" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="probe" containerID="cri-o://1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f" gracePeriod=30 Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.533252 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-0" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="manila-scheduler" containerID="cri-o://59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73" gracePeriod=30 Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.559917 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.560292 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-0" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api" containerID="cri-o://aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c" gracePeriod=30 Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.560258 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-0" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api-log" containerID="cri-o://9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb" gracePeriod=30 Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.561410 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnjjh\" (UniqueName: \"kubernetes.io/projected/7cd09812-330e-4771-9d44-4b2bd8bbed2d-kube-api-access-nnjjh\") pod \"manila1218-account-delete-5kpgc\" (UID: \"7cd09812-330e-4771-9d44-4b2bd8bbed2d\") " pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.663289 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnjjh\" (UniqueName: \"kubernetes.io/projected/7cd09812-330e-4771-9d44-4b2bd8bbed2d-kube-api-access-nnjjh\") pod \"manila1218-account-delete-5kpgc\" (UID: \"7cd09812-330e-4771-9d44-4b2bd8bbed2d\") " pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.687746 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnjjh\" (UniqueName: \"kubernetes.io/projected/7cd09812-330e-4771-9d44-4b2bd8bbed2d-kube-api-access-nnjjh\") pod \"manila1218-account-delete-5kpgc\" (UID: \"7cd09812-330e-4771-9d44-4b2bd8bbed2d\") " pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.769559 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3956af07-f2c2-4ccb-8556-d1cb4fe922cc" path="/var/lib/kubelet/pods/3956af07-f2c2-4ccb-8556-d1cb4fe922cc/volumes" Nov 22 05:10:39 crc kubenswrapper[4948]: I1122 05:10:39.817932 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.199083 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.257998 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila1218-account-delete-5kpgc"] Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271152 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-ceph\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271203 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271259 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-etc-machine-id\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271288 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-combined-ca-bundle\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271317 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data-custom\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271402 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9559\" (UniqueName: \"kubernetes.io/projected/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-kube-api-access-r9559\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271489 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-var-lib-manila\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.271569 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-scripts\") pod \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\" (UID: \"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0\") " Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.277219 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-scripts" (OuterVolumeSpecName: "scripts") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.277358 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.277425 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.280023 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-kube-api-access-r9559" (OuterVolumeSpecName: "kube-api-access-r9559") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "kube-api-access-r9559". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.283607 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.283641 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-ceph" (OuterVolumeSpecName: "ceph") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.319193 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.342547 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data" (OuterVolumeSpecName: "config-data") pod "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" (UID: "9f5ed6a5-2057-4c31-86bb-f9cc036f77f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.372908 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9559\" (UniqueName: \"kubernetes.io/projected/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-kube-api-access-r9559\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.372945 4948 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.372957 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.372967 4948 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-ceph\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.372978 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.372990 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.373000 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.373010 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.401201 4948 generic.go:334] "Generic (PLEG): container finished" podID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerID="df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996" exitCode=0 Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.401237 4948 generic.go:334] "Generic (PLEG): container finished" podID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerID="9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c" exitCode=1 Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.401281 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0","Type":"ContainerDied","Data":"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996"} Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.401311 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0","Type":"ContainerDied","Data":"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c"} Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.401322 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"9f5ed6a5-2057-4c31-86bb-f9cc036f77f0","Type":"ContainerDied","Data":"0ee804459d97c53b2b235e76e666404f5acf4bf96821e700e6a7b09ca96ebbbb"} Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.401339 4948 scope.go:117] "RemoveContainer" containerID="df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.401491 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.404435 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" event={"ID":"7cd09812-330e-4771-9d44-4b2bd8bbed2d","Type":"ContainerStarted","Data":"ac29e76556551ce059de57891c2532e41ff4ef38738d8eecfe6c731802afb818"} Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.406954 4948 generic.go:334] "Generic (PLEG): container finished" podID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerID="1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f" exitCode=0 Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.407009 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee","Type":"ContainerDied","Data":"1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f"} Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.409678 4948 generic.go:334] "Generic (PLEG): container finished" podID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerID="9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb" exitCode=143 Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.409715 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"158cb42b-1482-47bb-ba07-6357ec2a8562","Type":"ContainerDied","Data":"9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb"} Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.424610 4948 scope.go:117] "RemoveContainer" containerID="9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.438531 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" podStartSLOduration=1.438455855 podStartE2EDuration="1.438455855s" podCreationTimestamp="2025-11-22 05:10:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:10:40.433769172 +0000 UTC m=+1443.119779688" watchObservedRunningTime="2025-11-22 05:10:40.438455855 +0000 UTC m=+1443.124466371" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.447244 4948 scope.go:117] "RemoveContainer" containerID="df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996" Nov 22 05:10:40 crc kubenswrapper[4948]: E1122 05:10:40.449988 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996\": container with ID starting with df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996 not found: ID does not exist" containerID="df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.450034 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996"} err="failed to get container status \"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996\": rpc error: code = NotFound desc = could not find container \"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996\": container with ID starting with df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996 not found: ID does not exist" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.450069 4948 scope.go:117] "RemoveContainer" containerID="9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c" Nov 22 05:10:40 crc kubenswrapper[4948]: E1122 05:10:40.450331 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c\": container with ID starting with 9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c not found: ID does not exist" containerID="9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.450352 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c"} err="failed to get container status \"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c\": rpc error: code = NotFound desc = could not find container \"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c\": container with ID starting with 9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c not found: ID does not exist" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.450364 4948 scope.go:117] "RemoveContainer" containerID="df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.451114 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996"} err="failed to get container status \"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996\": rpc error: code = NotFound desc = could not find container \"df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996\": container with ID starting with df685be9666666abacf9d0093fdbbdd082dac49b633ece71a12c04a158916996 not found: ID does not exist" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.451189 4948 scope.go:117] "RemoveContainer" containerID="9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.451818 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c"} err="failed to get container status \"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c\": rpc error: code = NotFound desc = could not find container \"9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c\": container with ID starting with 9480e361447d5e8e52c5494169181464f96e47d0033d48f62235c8c38c14fe7c not found: ID does not exist" Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.456993 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:10:40 crc kubenswrapper[4948]: I1122 05:10:40.457042 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:10:41 crc kubenswrapper[4948]: I1122 05:10:41.426860 4948 generic.go:334] "Generic (PLEG): container finished" podID="7cd09812-330e-4771-9d44-4b2bd8bbed2d" containerID="6d41d5334b88ca9a815e1dad187419919264864f39498ebcfaf9b8b2d3e807dc" exitCode=0 Nov 22 05:10:41 crc kubenswrapper[4948]: I1122 05:10:41.426923 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" event={"ID":"7cd09812-330e-4771-9d44-4b2bd8bbed2d","Type":"ContainerDied","Data":"6d41d5334b88ca9a815e1dad187419919264864f39498ebcfaf9b8b2d3e807dc"} Nov 22 05:10:41 crc kubenswrapper[4948]: I1122 05:10:41.770407 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" path="/var/lib/kubelet/pods/9f5ed6a5-2057-4c31-86bb-f9cc036f77f0/volumes" Nov 22 05:10:42 crc kubenswrapper[4948]: I1122 05:10:42.834966 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.007900 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnjjh\" (UniqueName: \"kubernetes.io/projected/7cd09812-330e-4771-9d44-4b2bd8bbed2d-kube-api-access-nnjjh\") pod \"7cd09812-330e-4771-9d44-4b2bd8bbed2d\" (UID: \"7cd09812-330e-4771-9d44-4b2bd8bbed2d\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.017365 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cd09812-330e-4771-9d44-4b2bd8bbed2d-kube-api-access-nnjjh" (OuterVolumeSpecName: "kube-api-access-nnjjh") pod "7cd09812-330e-4771-9d44-4b2bd8bbed2d" (UID: "7cd09812-330e-4771-9d44-4b2bd8bbed2d"). InnerVolumeSpecName "kube-api-access-nnjjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.095617 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.109251 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnjjh\" (UniqueName: \"kubernetes.io/projected/7cd09812-330e-4771-9d44-4b2bd8bbed2d-kube-api-access-nnjjh\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.210365 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data-custom\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.210662 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/158cb42b-1482-47bb-ba07-6357ec2a8562-etc-machine-id\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.210722 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.210750 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-combined-ca-bundle\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.210820 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/158cb42b-1482-47bb-ba07-6357ec2a8562-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.211104 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-scripts\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.211149 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-public-tls-certs\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.211176 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/158cb42b-1482-47bb-ba07-6357ec2a8562-logs\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.211231 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-internal-tls-certs\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.211310 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5t4d\" (UniqueName: \"kubernetes.io/projected/158cb42b-1482-47bb-ba07-6357ec2a8562-kube-api-access-t5t4d\") pod \"158cb42b-1482-47bb-ba07-6357ec2a8562\" (UID: \"158cb42b-1482-47bb-ba07-6357ec2a8562\") " Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.211657 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/158cb42b-1482-47bb-ba07-6357ec2a8562-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.211877 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/158cb42b-1482-47bb-ba07-6357ec2a8562-logs" (OuterVolumeSpecName: "logs") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.214750 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-scripts" (OuterVolumeSpecName: "scripts") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.214763 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.214900 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/158cb42b-1482-47bb-ba07-6357ec2a8562-kube-api-access-t5t4d" (OuterVolumeSpecName: "kube-api-access-t5t4d") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "kube-api-access-t5t4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.228905 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.244567 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data" (OuterVolumeSpecName: "config-data") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.246032 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.246443 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "158cb42b-1482-47bb-ba07-6357ec2a8562" (UID: "158cb42b-1482-47bb-ba07-6357ec2a8562"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313772 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313818 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313837 4948 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313858 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/158cb42b-1482-47bb-ba07-6357ec2a8562-logs\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313875 4948 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313892 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5t4d\" (UniqueName: \"kubernetes.io/projected/158cb42b-1482-47bb-ba07-6357ec2a8562-kube-api-access-t5t4d\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313913 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.313930 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/158cb42b-1482-47bb-ba07-6357ec2a8562-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.444578 4948 generic.go:334] "Generic (PLEG): container finished" podID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerID="aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c" exitCode=0 Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.444641 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.444654 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"158cb42b-1482-47bb-ba07-6357ec2a8562","Type":"ContainerDied","Data":"aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c"} Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.444686 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"158cb42b-1482-47bb-ba07-6357ec2a8562","Type":"ContainerDied","Data":"8d17ca7348da4c7c84d2bb2c0a517f9c55d69d70a2887a94cfc71cdf45987e43"} Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.444728 4948 scope.go:117] "RemoveContainer" containerID="aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.448346 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" event={"ID":"7cd09812-330e-4771-9d44-4b2bd8bbed2d","Type":"ContainerDied","Data":"ac29e76556551ce059de57891c2532e41ff4ef38738d8eecfe6c731802afb818"} Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.448372 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac29e76556551ce059de57891c2532e41ff4ef38738d8eecfe6c731802afb818" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.448407 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila1218-account-delete-5kpgc" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.469481 4948 scope.go:117] "RemoveContainer" containerID="9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.484261 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.490743 4948 scope.go:117] "RemoveContainer" containerID="aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c" Nov 22 05:10:43 crc kubenswrapper[4948]: E1122 05:10:43.491268 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c\": container with ID starting with aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c not found: ID does not exist" containerID="aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.491317 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c"} err="failed to get container status \"aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c\": rpc error: code = NotFound desc = could not find container \"aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c\": container with ID starting with aee0322b2c8781b766b47906c2853b50416b1a8370190908b9cc4d16f9d3328c not found: ID does not exist" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.491347 4948 scope.go:117] "RemoveContainer" containerID="9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.491710 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:10:43 crc kubenswrapper[4948]: E1122 05:10:43.491737 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb\": container with ID starting with 9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb not found: ID does not exist" containerID="9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.491774 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb"} err="failed to get container status \"9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb\": rpc error: code = NotFound desc = could not find container \"9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb\": container with ID starting with 9737b0cc850d213591b3040c845c72df833206981a15a81193c64a66baf62ecb not found: ID does not exist" Nov 22 05:10:43 crc kubenswrapper[4948]: I1122 05:10:43.772534 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" path="/var/lib/kubelet/pods/158cb42b-1482-47bb-ba07-6357ec2a8562/volumes" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.236346 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.434943 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-combined-ca-bundle\") pod \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.434995 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data-custom\") pod \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.435053 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-scripts\") pod \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.435118 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data\") pod \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.435992 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-etc-machine-id\") pod \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.436023 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hq996\" (UniqueName: \"kubernetes.io/projected/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-kube-api-access-hq996\") pod \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\" (UID: \"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee\") " Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.436046 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" (UID: "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.436357 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.439034 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-kube-api-access-hq996" (OuterVolumeSpecName: "kube-api-access-hq996") pod "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" (UID: "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee"). InnerVolumeSpecName "kube-api-access-hq996". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.440293 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-scripts" (OuterVolumeSpecName: "scripts") pod "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" (UID: "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.440344 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" (UID: "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.479709 4948 generic.go:334] "Generic (PLEG): container finished" podID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerID="59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73" exitCode=0 Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.479940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee","Type":"ContainerDied","Data":"59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73"} Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.480043 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee","Type":"ContainerDied","Data":"2300fd7491692b1a416044954d1b1bacaef3194a37547c01b7a5c632e4404215"} Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.480054 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.480177 4948 scope.go:117] "RemoveContainer" containerID="1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.507732 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-db-create-cz6pn"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.514764 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" (UID: "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.523335 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-db-create-cz6pn"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.528706 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-1218-account-create-wdsdf"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.536165 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila1218-account-delete-5kpgc"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.539226 4948 scope.go:117] "RemoveContainer" containerID="59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.542915 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-1218-account-create-wdsdf"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.544698 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hq996\" (UniqueName: \"kubernetes.io/projected/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-kube-api-access-hq996\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.544865 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.544978 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.545049 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.548509 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila1218-account-delete-5kpgc"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.570304 4948 scope.go:117] "RemoveContainer" containerID="1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.571282 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f\": container with ID starting with 1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f not found: ID does not exist" containerID="1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.571331 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f"} err="failed to get container status \"1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f\": rpc error: code = NotFound desc = could not find container \"1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f\": container with ID starting with 1efddfec975987eb6db714b3a3a6c82c05848cd85140fe23fa98a780913dcc4f not found: ID does not exist" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.571361 4948 scope.go:117] "RemoveContainer" containerID="59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.571952 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73\": container with ID starting with 59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73 not found: ID does not exist" containerID="59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.572059 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73"} err="failed to get container status \"59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73\": rpc error: code = NotFound desc = could not find container \"59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73\": container with ID starting with 59fbb49ecd95d607273d366f02a377f66888524bc1f4c74e2eeb79da0b261f73 not found: ID does not exist" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.573781 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data" (OuterVolumeSpecName: "config-data") pod "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" (UID: "1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.645646 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.669432 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-db-create-fcl2h"] Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.669852 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.669886 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.669911 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="probe" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.669924 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="probe" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.669943 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cd09812-330e-4771-9d44-4b2bd8bbed2d" containerName="mariadb-account-delete" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.669955 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cd09812-330e-4771-9d44-4b2bd8bbed2d" containerName="mariadb-account-delete" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.669975 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api-log" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.669986 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api-log" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.670003 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="probe" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670013 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="probe" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.670040 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="manila-scheduler" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670051 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="manila-scheduler" Nov 22 05:10:44 crc kubenswrapper[4948]: E1122 05:10:44.670069 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="manila-share" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670082 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="manila-share" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670269 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api-log" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670291 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="158cb42b-1482-47bb-ba07-6357ec2a8562" containerName="manila-api" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670309 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="manila-scheduler" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670326 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="probe" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670342 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f5ed6a5-2057-4c31-86bb-f9cc036f77f0" containerName="manila-share" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670357 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cd09812-330e-4771-9d44-4b2bd8bbed2d" containerName="mariadb-account-delete" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.670376 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" containerName="probe" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.671045 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-fcl2h" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.683642 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-create-fcl2h"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.747111 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw6tq\" (UniqueName: \"kubernetes.io/projected/795a2378-eed3-4d54-ae4f-0e73ae06efe9-kube-api-access-xw6tq\") pod \"manila-db-create-fcl2h\" (UID: \"795a2378-eed3-4d54-ae4f-0e73ae06efe9\") " pod="manila-kuttl-tests/manila-db-create-fcl2h" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.813886 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.821043 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.850951 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw6tq\" (UniqueName: \"kubernetes.io/projected/795a2378-eed3-4d54-ae4f-0e73ae06efe9-kube-api-access-xw6tq\") pod \"manila-db-create-fcl2h\" (UID: \"795a2378-eed3-4d54-ae4f-0e73ae06efe9\") " pod="manila-kuttl-tests/manila-db-create-fcl2h" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.868040 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw6tq\" (UniqueName: \"kubernetes.io/projected/795a2378-eed3-4d54-ae4f-0e73ae06efe9-kube-api-access-xw6tq\") pod \"manila-db-create-fcl2h\" (UID: \"795a2378-eed3-4d54-ae4f-0e73ae06efe9\") " pod="manila-kuttl-tests/manila-db-create-fcl2h" Nov 22 05:10:44 crc kubenswrapper[4948]: I1122 05:10:44.991442 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-fcl2h" Nov 22 05:10:45 crc kubenswrapper[4948]: I1122 05:10:45.285780 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-create-fcl2h"] Nov 22 05:10:45 crc kubenswrapper[4948]: I1122 05:10:45.496262 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-fcl2h" event={"ID":"795a2378-eed3-4d54-ae4f-0e73ae06efe9","Type":"ContainerStarted","Data":"5267cbb1c6eb8d28da22de7c536c289278e71bdbefcf319c7579941cdc799508"} Nov 22 05:10:45 crc kubenswrapper[4948]: I1122 05:10:45.496623 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-fcl2h" event={"ID":"795a2378-eed3-4d54-ae4f-0e73ae06efe9","Type":"ContainerStarted","Data":"5189a7433a263d42b7d4033cac4d493a07fac7ee98e5089078d89c3a359148fb"} Nov 22 05:10:45 crc kubenswrapper[4948]: I1122 05:10:45.772393 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee" path="/var/lib/kubelet/pods/1ca2cec1-b6e4-4be9-8f30-b6b410ac86ee/volumes" Nov 22 05:10:45 crc kubenswrapper[4948]: I1122 05:10:45.773957 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cd09812-330e-4771-9d44-4b2bd8bbed2d" path="/var/lib/kubelet/pods/7cd09812-330e-4771-9d44-4b2bd8bbed2d/volumes" Nov 22 05:10:45 crc kubenswrapper[4948]: I1122 05:10:45.774909 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82f9bffa-3363-4655-9e9f-25120440235d" path="/var/lib/kubelet/pods/82f9bffa-3363-4655-9e9f-25120440235d/volumes" Nov 22 05:10:45 crc kubenswrapper[4948]: I1122 05:10:45.775790 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1" path="/var/lib/kubelet/pods/99a0ab8b-0ffd-4d9c-8ddf-0abda75626e1/volumes" Nov 22 05:10:46 crc kubenswrapper[4948]: I1122 05:10:46.510504 4948 generic.go:334] "Generic (PLEG): container finished" podID="795a2378-eed3-4d54-ae4f-0e73ae06efe9" containerID="5267cbb1c6eb8d28da22de7c536c289278e71bdbefcf319c7579941cdc799508" exitCode=0 Nov 22 05:10:46 crc kubenswrapper[4948]: I1122 05:10:46.510568 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-fcl2h" event={"ID":"795a2378-eed3-4d54-ae4f-0e73ae06efe9","Type":"ContainerDied","Data":"5267cbb1c6eb8d28da22de7c536c289278e71bdbefcf319c7579941cdc799508"} Nov 22 05:10:46 crc kubenswrapper[4948]: I1122 05:10:46.931140 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-fcl2h" Nov 22 05:10:46 crc kubenswrapper[4948]: I1122 05:10:46.985112 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw6tq\" (UniqueName: \"kubernetes.io/projected/795a2378-eed3-4d54-ae4f-0e73ae06efe9-kube-api-access-xw6tq\") pod \"795a2378-eed3-4d54-ae4f-0e73ae06efe9\" (UID: \"795a2378-eed3-4d54-ae4f-0e73ae06efe9\") " Nov 22 05:10:46 crc kubenswrapper[4948]: I1122 05:10:46.999801 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/795a2378-eed3-4d54-ae4f-0e73ae06efe9-kube-api-access-xw6tq" (OuterVolumeSpecName: "kube-api-access-xw6tq") pod "795a2378-eed3-4d54-ae4f-0e73ae06efe9" (UID: "795a2378-eed3-4d54-ae4f-0e73ae06efe9"). InnerVolumeSpecName "kube-api-access-xw6tq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:47 crc kubenswrapper[4948]: I1122 05:10:47.087480 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw6tq\" (UniqueName: \"kubernetes.io/projected/795a2378-eed3-4d54-ae4f-0e73ae06efe9-kube-api-access-xw6tq\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:47 crc kubenswrapper[4948]: I1122 05:10:47.522201 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-create-fcl2h" event={"ID":"795a2378-eed3-4d54-ae4f-0e73ae06efe9","Type":"ContainerDied","Data":"5189a7433a263d42b7d4033cac4d493a07fac7ee98e5089078d89c3a359148fb"} Nov 22 05:10:47 crc kubenswrapper[4948]: I1122 05:10:47.522261 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5189a7433a263d42b7d4033cac4d493a07fac7ee98e5089078d89c3a359148fb" Nov 22 05:10:47 crc kubenswrapper[4948]: I1122 05:10:47.522295 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-create-fcl2h" Nov 22 05:10:54 crc kubenswrapper[4948]: I1122 05:10:54.750200 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-20fe-account-create-rvj7f"] Nov 22 05:10:54 crc kubenswrapper[4948]: E1122 05:10:54.751018 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="795a2378-eed3-4d54-ae4f-0e73ae06efe9" containerName="mariadb-database-create" Nov 22 05:10:54 crc kubenswrapper[4948]: I1122 05:10:54.751033 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="795a2378-eed3-4d54-ae4f-0e73ae06efe9" containerName="mariadb-database-create" Nov 22 05:10:54 crc kubenswrapper[4948]: I1122 05:10:54.751173 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="795a2378-eed3-4d54-ae4f-0e73ae06efe9" containerName="mariadb-database-create" Nov 22 05:10:54 crc kubenswrapper[4948]: I1122 05:10:54.751655 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" Nov 22 05:10:54 crc kubenswrapper[4948]: I1122 05:10:54.755116 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-db-secret" Nov 22 05:10:54 crc kubenswrapper[4948]: I1122 05:10:54.759151 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-20fe-account-create-rvj7f"] Nov 22 05:10:54 crc kubenswrapper[4948]: I1122 05:10:54.912803 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9c5s\" (UniqueName: \"kubernetes.io/projected/d6fa8999-034c-434a-8003-e03df12a0d32-kube-api-access-c9c5s\") pod \"manila-20fe-account-create-rvj7f\" (UID: \"d6fa8999-034c-434a-8003-e03df12a0d32\") " pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" Nov 22 05:10:55 crc kubenswrapper[4948]: I1122 05:10:55.014701 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9c5s\" (UniqueName: \"kubernetes.io/projected/d6fa8999-034c-434a-8003-e03df12a0d32-kube-api-access-c9c5s\") pod \"manila-20fe-account-create-rvj7f\" (UID: \"d6fa8999-034c-434a-8003-e03df12a0d32\") " pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" Nov 22 05:10:55 crc kubenswrapper[4948]: I1122 05:10:55.042154 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9c5s\" (UniqueName: \"kubernetes.io/projected/d6fa8999-034c-434a-8003-e03df12a0d32-kube-api-access-c9c5s\") pod \"manila-20fe-account-create-rvj7f\" (UID: \"d6fa8999-034c-434a-8003-e03df12a0d32\") " pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" Nov 22 05:10:55 crc kubenswrapper[4948]: I1122 05:10:55.076644 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" Nov 22 05:10:55 crc kubenswrapper[4948]: I1122 05:10:55.332197 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-20fe-account-create-rvj7f"] Nov 22 05:10:55 crc kubenswrapper[4948]: I1122 05:10:55.601519 4948 generic.go:334] "Generic (PLEG): container finished" podID="d6fa8999-034c-434a-8003-e03df12a0d32" containerID="d450927dd523b8b64dd98d927699272bf7908b53cc5a7c02be62532282f10bc7" exitCode=0 Nov 22 05:10:55 crc kubenswrapper[4948]: I1122 05:10:55.601648 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" event={"ID":"d6fa8999-034c-434a-8003-e03df12a0d32","Type":"ContainerDied","Data":"d450927dd523b8b64dd98d927699272bf7908b53cc5a7c02be62532282f10bc7"} Nov 22 05:10:55 crc kubenswrapper[4948]: I1122 05:10:55.602030 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" event={"ID":"d6fa8999-034c-434a-8003-e03df12a0d32","Type":"ContainerStarted","Data":"28b09099096c8f61d755866c8c19fdd4078bf6ca7fd2729386b95f877f563f3e"} Nov 22 05:10:56 crc kubenswrapper[4948]: I1122 05:10:56.961040 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" Nov 22 05:10:57 crc kubenswrapper[4948]: I1122 05:10:57.154015 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9c5s\" (UniqueName: \"kubernetes.io/projected/d6fa8999-034c-434a-8003-e03df12a0d32-kube-api-access-c9c5s\") pod \"d6fa8999-034c-434a-8003-e03df12a0d32\" (UID: \"d6fa8999-034c-434a-8003-e03df12a0d32\") " Nov 22 05:10:57 crc kubenswrapper[4948]: I1122 05:10:57.162029 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6fa8999-034c-434a-8003-e03df12a0d32-kube-api-access-c9c5s" (OuterVolumeSpecName: "kube-api-access-c9c5s") pod "d6fa8999-034c-434a-8003-e03df12a0d32" (UID: "d6fa8999-034c-434a-8003-e03df12a0d32"). InnerVolumeSpecName "kube-api-access-c9c5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:10:57 crc kubenswrapper[4948]: I1122 05:10:57.256516 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9c5s\" (UniqueName: \"kubernetes.io/projected/d6fa8999-034c-434a-8003-e03df12a0d32-kube-api-access-c9c5s\") on node \"crc\" DevicePath \"\"" Nov 22 05:10:57 crc kubenswrapper[4948]: I1122 05:10:57.622423 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" event={"ID":"d6fa8999-034c-434a-8003-e03df12a0d32","Type":"ContainerDied","Data":"28b09099096c8f61d755866c8c19fdd4078bf6ca7fd2729386b95f877f563f3e"} Nov 22 05:10:57 crc kubenswrapper[4948]: I1122 05:10:57.622478 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28b09099096c8f61d755866c8c19fdd4078bf6ca7fd2729386b95f877f563f3e" Nov 22 05:10:57 crc kubenswrapper[4948]: I1122 05:10:57.622535 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-20fe-account-create-rvj7f" Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.790255 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.791150 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.990352 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-db-sync-vhphg"] Nov 22 05:10:59 crc kubenswrapper[4948]: E1122 05:10:59.990846 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6fa8999-034c-434a-8003-e03df12a0d32" containerName="mariadb-account-create" Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.990925 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6fa8999-034c-434a-8003-e03df12a0d32" containerName="mariadb-account-create" Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.991156 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6fa8999-034c-434a-8003-e03df12a0d32" containerName="mariadb-account-create" Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.991805 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.994048 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-config-data" Nov 22 05:10:59 crc kubenswrapper[4948]: I1122 05:10:59.994146 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-manila-dockercfg-cr5jd" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.019593 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-sync-vhphg"] Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.096957 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nwlm\" (UniqueName: \"kubernetes.io/projected/d17870ed-d0e0-4ab7-86bb-0e14eb828406-kube-api-access-4nwlm\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.097023 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-config-data\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.097060 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-job-config-data\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.198844 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nwlm\" (UniqueName: \"kubernetes.io/projected/d17870ed-d0e0-4ab7-86bb-0e14eb828406-kube-api-access-4nwlm\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.198944 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-config-data\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.198999 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-job-config-data\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.203710 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-job-config-data\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.217058 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-config-data\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.221040 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nwlm\" (UniqueName: \"kubernetes.io/projected/d17870ed-d0e0-4ab7-86bb-0e14eb828406-kube-api-access-4nwlm\") pod \"manila-db-sync-vhphg\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.316037 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:00 crc kubenswrapper[4948]: I1122 05:11:00.785177 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-db-sync-vhphg"] Nov 22 05:11:01 crc kubenswrapper[4948]: I1122 05:11:01.658995 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-vhphg" event={"ID":"d17870ed-d0e0-4ab7-86bb-0e14eb828406","Type":"ContainerStarted","Data":"f1a5830e641af270376aefef6ca0ccb665324e3ea42647742fa12c651f8a762a"} Nov 22 05:11:01 crc kubenswrapper[4948]: I1122 05:11:01.659455 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-vhphg" event={"ID":"d17870ed-d0e0-4ab7-86bb-0e14eb828406","Type":"ContainerStarted","Data":"e1bda633bfb1d7645fa440aecf7b13f13b5f97d835f97f71e02d9671acea6291"} Nov 22 05:11:01 crc kubenswrapper[4948]: I1122 05:11:01.677621 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-db-sync-vhphg" podStartSLOduration=2.677601027 podStartE2EDuration="2.677601027s" podCreationTimestamp="2025-11-22 05:10:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:11:01.672551893 +0000 UTC m=+1464.358562409" watchObservedRunningTime="2025-11-22 05:11:01.677601027 +0000 UTC m=+1464.363611543" Nov 22 05:11:02 crc kubenswrapper[4948]: I1122 05:11:02.672696 4948 generic.go:334] "Generic (PLEG): container finished" podID="d17870ed-d0e0-4ab7-86bb-0e14eb828406" containerID="f1a5830e641af270376aefef6ca0ccb665324e3ea42647742fa12c651f8a762a" exitCode=0 Nov 22 05:11:02 crc kubenswrapper[4948]: I1122 05:11:02.672925 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-vhphg" event={"ID":"d17870ed-d0e0-4ab7-86bb-0e14eb828406","Type":"ContainerDied","Data":"f1a5830e641af270376aefef6ca0ccb665324e3ea42647742fa12c651f8a762a"} Nov 22 05:11:03 crc kubenswrapper[4948]: I1122 05:11:03.976614 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.170969 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-job-config-data\") pod \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.171157 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nwlm\" (UniqueName: \"kubernetes.io/projected/d17870ed-d0e0-4ab7-86bb-0e14eb828406-kube-api-access-4nwlm\") pod \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.171245 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-config-data\") pod \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\" (UID: \"d17870ed-d0e0-4ab7-86bb-0e14eb828406\") " Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.177406 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d17870ed-d0e0-4ab7-86bb-0e14eb828406-kube-api-access-4nwlm" (OuterVolumeSpecName: "kube-api-access-4nwlm") pod "d17870ed-d0e0-4ab7-86bb-0e14eb828406" (UID: "d17870ed-d0e0-4ab7-86bb-0e14eb828406"). InnerVolumeSpecName "kube-api-access-4nwlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.180657 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "d17870ed-d0e0-4ab7-86bb-0e14eb828406" (UID: "d17870ed-d0e0-4ab7-86bb-0e14eb828406"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.182404 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-config-data" (OuterVolumeSpecName: "config-data") pod "d17870ed-d0e0-4ab7-86bb-0e14eb828406" (UID: "d17870ed-d0e0-4ab7-86bb-0e14eb828406"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.272620 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nwlm\" (UniqueName: \"kubernetes.io/projected/d17870ed-d0e0-4ab7-86bb-0e14eb828406-kube-api-access-4nwlm\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.272661 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.272673 4948 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/d17870ed-d0e0-4ab7-86bb-0e14eb828406-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.688286 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-db-sync-vhphg" event={"ID":"d17870ed-d0e0-4ab7-86bb-0e14eb828406","Type":"ContainerDied","Data":"e1bda633bfb1d7645fa440aecf7b13f13b5f97d835f97f71e02d9671acea6291"} Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.688658 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1bda633bfb1d7645fa440aecf7b13f13b5f97d835f97f71e02d9671acea6291" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.688554 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-db-sync-vhphg" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.967749 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:11:04 crc kubenswrapper[4948]: E1122 05:11:04.967995 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d17870ed-d0e0-4ab7-86bb-0e14eb828406" containerName="manila-db-sync" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.968008 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d17870ed-d0e0-4ab7-86bb-0e14eb828406" containerName="manila-db-sync" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.968132 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d17870ed-d0e0-4ab7-86bb-0e14eb828406" containerName="manila-db-sync" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.968708 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.970306 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-scheduler-config-data" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.976804 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-scripts" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.977045 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-manila-dockercfg-cr5jd" Nov 22 05:11:04 crc kubenswrapper[4948]: I1122 05:11:04.982043 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-config-data" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.009288 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.083029 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.084349 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.086883 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-share-share0-config-data" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.089050 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jckpc\" (UniqueName: \"kubernetes.io/projected/83e06745-7dbe-41e5-b492-6a176c6e579f-kube-api-access-jckpc\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.089089 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e06745-7dbe-41e5-b492-6a176c6e579f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.089111 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.089129 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.089147 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-scripts\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.089501 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"ceph-conf-files" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.106007 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.131185 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.132313 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.134102 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-api-config-data" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.141853 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.190968 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191006 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191029 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh47d\" (UniqueName: \"kubernetes.io/projected/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-kube-api-access-nh47d\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191059 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data-custom\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191146 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jckpc\" (UniqueName: \"kubernetes.io/projected/83e06745-7dbe-41e5-b492-6a176c6e579f-kube-api-access-jckpc\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191168 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191187 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-scripts\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191215 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e06745-7dbe-41e5-b492-6a176c6e579f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191233 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191253 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bdrp\" (UniqueName: \"kubernetes.io/projected/002a2e38-d4b9-4c13-bde1-5b01261fe21d-kube-api-access-7bdrp\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191270 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/002a2e38-d4b9-4c13-bde1-5b01261fe21d-logs\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191307 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191324 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e06745-7dbe-41e5-b492-6a176c6e579f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191426 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-ceph\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191483 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191506 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-scripts\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191551 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-scripts\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191586 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.191605 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/002a2e38-d4b9-4c13-bde1-5b01261fe21d-etc-machine-id\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.197417 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.201261 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.206280 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-scripts\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.206927 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jckpc\" (UniqueName: \"kubernetes.io/projected/83e06745-7dbe-41e5-b492-6a176c6e579f-kube-api-access-jckpc\") pod \"manila-scheduler-0\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.292509 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-ceph\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.292549 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.292581 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-scripts\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293058 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293082 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/002a2e38-d4b9-4c13-bde1-5b01261fe21d-etc-machine-id\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293100 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293115 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293133 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh47d\" (UniqueName: \"kubernetes.io/projected/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-kube-api-access-nh47d\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293154 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data-custom\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293166 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/002a2e38-d4b9-4c13-bde1-5b01261fe21d-etc-machine-id\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293177 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-etc-machine-id\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293187 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293333 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-var-lib-manila\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293352 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-scripts\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293481 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bdrp\" (UniqueName: \"kubernetes.io/projected/002a2e38-d4b9-4c13-bde1-5b01261fe21d-kube-api-access-7bdrp\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293508 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/002a2e38-d4b9-4c13-bde1-5b01261fe21d-logs\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.293975 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/002a2e38-d4b9-4c13-bde1-5b01261fe21d-logs\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.295340 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.296244 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-ceph\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.296954 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.297104 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data-custom\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.297379 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-scripts\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.307041 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data-custom\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.307740 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.308810 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-scripts\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.312717 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh47d\" (UniqueName: \"kubernetes.io/projected/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-kube-api-access-nh47d\") pod \"manila-share-share0-0\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.315666 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bdrp\" (UniqueName: \"kubernetes.io/projected/002a2e38-d4b9-4c13-bde1-5b01261fe21d-kube-api-access-7bdrp\") pod \"manila-api-0\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.399394 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.447549 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.531680 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.705942 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"83e06745-7dbe-41e5-b492-6a176c6e579f","Type":"ContainerStarted","Data":"608fa0006a86333dd7c4c82d5461b75986c91926ccc8d1cea2422c437db60805"} Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.714514 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: W1122 05:11:05.729278 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod002a2e38_d4b9_4c13_bde1_5b01261fe21d.slice/crio-df725982e65c7b0c2d018dbafc0633366a9b1ac88f195ade250c7e9c1b03eb2a WatchSource:0}: Error finding container df725982e65c7b0c2d018dbafc0633366a9b1ac88f195ade250c7e9c1b03eb2a: Status 404 returned error can't find the container with id df725982e65c7b0c2d018dbafc0633366a9b1ac88f195ade250c7e9c1b03eb2a Nov 22 05:11:05 crc kubenswrapper[4948]: I1122 05:11:05.882208 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:11:05 crc kubenswrapper[4948]: W1122 05:11:05.893012 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7a0e7bb_bc72_4fac_89e5_f9f68b0f4537.slice/crio-83c83fd0ab7391ac954d8b9a8029bd433e90f54c5a72e1bb17915beb2a817e49 WatchSource:0}: Error finding container 83c83fd0ab7391ac954d8b9a8029bd433e90f54c5a72e1bb17915beb2a817e49: Status 404 returned error can't find the container with id 83c83fd0ab7391ac954d8b9a8029bd433e90f54c5a72e1bb17915beb2a817e49 Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.724791 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"002a2e38-d4b9-4c13-bde1-5b01261fe21d","Type":"ContainerStarted","Data":"653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130"} Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.725312 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"002a2e38-d4b9-4c13-bde1-5b01261fe21d","Type":"ContainerStarted","Data":"deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4"} Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.725323 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"002a2e38-d4b9-4c13-bde1-5b01261fe21d","Type":"ContainerStarted","Data":"df725982e65c7b0c2d018dbafc0633366a9b1ac88f195ade250c7e9c1b03eb2a"} Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.726277 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.732459 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537","Type":"ContainerStarted","Data":"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105"} Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.732587 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537","Type":"ContainerStarted","Data":"83c83fd0ab7391ac954d8b9a8029bd433e90f54c5a72e1bb17915beb2a817e49"} Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.734457 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"83e06745-7dbe-41e5-b492-6a176c6e579f","Type":"ContainerStarted","Data":"e980ff13a6f51ba38cd9acd76625d5e28e66d00a586b5e0a3b913ae75135e51c"} Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.734492 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"83e06745-7dbe-41e5-b492-6a176c6e579f","Type":"ContainerStarted","Data":"92d0685a3cd38485807b86be49b0f2a64504b1722584321fa0a897e865fe1a3e"} Nov 22 05:11:06 crc kubenswrapper[4948]: I1122 05:11:06.774001 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-api-0" podStartSLOduration=1.7739767180000001 podStartE2EDuration="1.773976718s" podCreationTimestamp="2025-11-22 05:11:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:11:06.746001313 +0000 UTC m=+1469.432011819" watchObservedRunningTime="2025-11-22 05:11:06.773976718 +0000 UTC m=+1469.459987244" Nov 22 05:11:07 crc kubenswrapper[4948]: I1122 05:11:07.745543 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537","Type":"ContainerStarted","Data":"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae"} Nov 22 05:11:07 crc kubenswrapper[4948]: I1122 05:11:07.770545 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-share-share0-0" podStartSLOduration=2.770517142 podStartE2EDuration="2.770517142s" podCreationTimestamp="2025-11-22 05:11:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:11:07.76831608 +0000 UTC m=+1470.454326596" watchObservedRunningTime="2025-11-22 05:11:07.770517142 +0000 UTC m=+1470.456527668" Nov 22 05:11:07 crc kubenswrapper[4948]: I1122 05:11:07.771184 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-scheduler-0" podStartSLOduration=3.771174751 podStartE2EDuration="3.771174751s" podCreationTimestamp="2025-11-22 05:11:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:11:06.775036948 +0000 UTC m=+1469.461047464" watchObservedRunningTime="2025-11-22 05:11:07.771174751 +0000 UTC m=+1470.457185287" Nov 22 05:11:15 crc kubenswrapper[4948]: I1122 05:11:15.296026 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:15 crc kubenswrapper[4948]: I1122 05:11:15.401005 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:16 crc kubenswrapper[4948]: I1122 05:11:16.762506 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:26 crc kubenswrapper[4948]: I1122 05:11:26.816835 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:26 crc kubenswrapper[4948]: I1122 05:11:26.817415 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.730531 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-share-share1-0"] Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.732195 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.734833 4948 reflector.go:368] Caches populated for *v1.Secret from object-"manila-kuttl-tests"/"manila-share-share1-config-data" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.743815 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share1-0"] Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.843382 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.843686 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.843711 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-scripts\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.843743 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.843771 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffmkh\" (UniqueName: \"kubernetes.io/projected/7c59af44-b4c5-4c5f-ac58-a256515aa02c-kube-api-access-ffmkh\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.843988 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-ceph\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.844115 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945033 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-ceph\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945111 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945157 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945178 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945202 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-scripts\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945239 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945263 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffmkh\" (UniqueName: \"kubernetes.io/projected/7c59af44-b4c5-4c5f-ac58-a256515aa02c-kube-api-access-ffmkh\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945423 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.945744 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.951716 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.952585 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.952836 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-scripts\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.955014 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-ceph\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:27 crc kubenswrapper[4948]: I1122 05:11:27.977877 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffmkh\" (UniqueName: \"kubernetes.io/projected/7c59af44-b4c5-4c5f-ac58-a256515aa02c-kube-api-access-ffmkh\") pod \"manila-share-share1-0\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:28 crc kubenswrapper[4948]: I1122 05:11:28.097112 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:28 crc kubenswrapper[4948]: I1122 05:11:28.572921 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-share-share1-0"] Nov 22 05:11:28 crc kubenswrapper[4948]: I1122 05:11:28.914129 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share1-0" event={"ID":"7c59af44-b4c5-4c5f-ac58-a256515aa02c","Type":"ContainerStarted","Data":"3a2fc35ceade30e30bf2257ab67c13aea8cbf5ec58f8b8190743dc10aabcb2a9"} Nov 22 05:11:29 crc kubenswrapper[4948]: I1122 05:11:29.789240 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:11:29 crc kubenswrapper[4948]: I1122 05:11:29.789923 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:11:29 crc kubenswrapper[4948]: I1122 05:11:29.925730 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share1-0" event={"ID":"7c59af44-b4c5-4c5f-ac58-a256515aa02c","Type":"ContainerStarted","Data":"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e"} Nov 22 05:11:29 crc kubenswrapper[4948]: I1122 05:11:29.927050 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share1-0" event={"ID":"7c59af44-b4c5-4c5f-ac58-a256515aa02c","Type":"ContainerStarted","Data":"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31"} Nov 22 05:11:30 crc kubenswrapper[4948]: I1122 05:11:30.241978 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila-share-share1-0" podStartSLOduration=3.241945193 podStartE2EDuration="3.241945193s" podCreationTimestamp="2025-11-22 05:11:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:11:30.235893281 +0000 UTC m=+1492.921903807" watchObservedRunningTime="2025-11-22 05:11:30.241945193 +0000 UTC m=+1492.927955739" Nov 22 05:11:38 crc kubenswrapper[4948]: I1122 05:11:38.097751 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.428944 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fxxqj"] Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.431020 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.478780 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fxxqj"] Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.569460 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-utilities\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.569743 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kn7r\" (UniqueName: \"kubernetes.io/projected/3adc4805-aba7-4719-9f09-6b54ade74c12-kube-api-access-4kn7r\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.569806 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-catalog-content\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.671493 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kn7r\" (UniqueName: \"kubernetes.io/projected/3adc4805-aba7-4719-9f09-6b54ade74c12-kube-api-access-4kn7r\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.671542 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-catalog-content\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.671574 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-utilities\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.672034 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-utilities\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.672096 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-catalog-content\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.689542 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kn7r\" (UniqueName: \"kubernetes.io/projected/3adc4805-aba7-4719-9f09-6b54ade74c12-kube-api-access-4kn7r\") pod \"community-operators-fxxqj\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:40 crc kubenswrapper[4948]: I1122 05:11:40.754442 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:41 crc kubenswrapper[4948]: I1122 05:11:41.250023 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fxxqj"] Nov 22 05:11:42 crc kubenswrapper[4948]: I1122 05:11:42.044812 4948 generic.go:334] "Generic (PLEG): container finished" podID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerID="7cd762c053010591d23851d4809ba25fa004c509c30a1f8fa562a3cf2f925b7c" exitCode=0 Nov 22 05:11:42 crc kubenswrapper[4948]: I1122 05:11:42.044879 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxxqj" event={"ID":"3adc4805-aba7-4719-9f09-6b54ade74c12","Type":"ContainerDied","Data":"7cd762c053010591d23851d4809ba25fa004c509c30a1f8fa562a3cf2f925b7c"} Nov 22 05:11:42 crc kubenswrapper[4948]: I1122 05:11:42.045184 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxxqj" event={"ID":"3adc4805-aba7-4719-9f09-6b54ade74c12","Type":"ContainerStarted","Data":"ec9ecde752857b3b67acb17d05af973f564818b233da6d3fce152df6af45e098"} Nov 22 05:11:43 crc kubenswrapper[4948]: I1122 05:11:43.061304 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxxqj" event={"ID":"3adc4805-aba7-4719-9f09-6b54ade74c12","Type":"ContainerStarted","Data":"29c7046492d7e6cc7ac82d82c8cbae3d755580b6aa515ddfee739069a28aafbc"} Nov 22 05:11:44 crc kubenswrapper[4948]: I1122 05:11:44.074185 4948 generic.go:334] "Generic (PLEG): container finished" podID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerID="29c7046492d7e6cc7ac82d82c8cbae3d755580b6aa515ddfee739069a28aafbc" exitCode=0 Nov 22 05:11:44 crc kubenswrapper[4948]: I1122 05:11:44.074249 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxxqj" event={"ID":"3adc4805-aba7-4719-9f09-6b54ade74c12","Type":"ContainerDied","Data":"29c7046492d7e6cc7ac82d82c8cbae3d755580b6aa515ddfee739069a28aafbc"} Nov 22 05:11:45 crc kubenswrapper[4948]: I1122 05:11:45.080647 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxxqj" event={"ID":"3adc4805-aba7-4719-9f09-6b54ade74c12","Type":"ContainerStarted","Data":"133255a0c91dd3bd641d672102675c567aebe64a8f2bbcf7d01a6e7ae06bdb15"} Nov 22 05:11:45 crc kubenswrapper[4948]: I1122 05:11:45.108172 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fxxqj" podStartSLOduration=2.70779396 podStartE2EDuration="5.108152984s" podCreationTimestamp="2025-11-22 05:11:40 +0000 UTC" firstStartedPulling="2025-11-22 05:11:42.048120911 +0000 UTC m=+1504.734131427" lastFinishedPulling="2025-11-22 05:11:44.448479935 +0000 UTC m=+1507.134490451" observedRunningTime="2025-11-22 05:11:45.10450538 +0000 UTC m=+1507.790515896" watchObservedRunningTime="2025-11-22 05:11:45.108152984 +0000 UTC m=+1507.794163500" Nov 22 05:11:49 crc kubenswrapper[4948]: I1122 05:11:49.481062 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:50 crc kubenswrapper[4948]: I1122 05:11:50.214286 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:11:50 crc kubenswrapper[4948]: I1122 05:11:50.215214 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share0-0" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="probe" containerID="cri-o://d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae" gracePeriod=30 Nov 22 05:11:50 crc kubenswrapper[4948]: I1122 05:11:50.215488 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share0-0" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="manila-share" containerID="cri-o://3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105" gracePeriod=30 Nov 22 05:11:50 crc kubenswrapper[4948]: I1122 05:11:50.765318 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:50 crc kubenswrapper[4948]: I1122 05:11:50.765546 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:50 crc kubenswrapper[4948]: I1122 05:11:50.814971 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:50 crc kubenswrapper[4948]: I1122 05:11:50.918872 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.032588 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-ceph\") pod \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.032671 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-var-lib-manila\") pod \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.032821 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" (UID: "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.032947 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data\") pod \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.033015 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nh47d\" (UniqueName: \"kubernetes.io/projected/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-kube-api-access-nh47d\") pod \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.033075 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-etc-machine-id\") pod \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.033208 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data-custom\") pod \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.033292 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-scripts\") pod \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\" (UID: \"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537\") " Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.033310 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" (UID: "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.033615 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.033637 4948 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.038896 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" (UID: "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.039072 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-kube-api-access-nh47d" (OuterVolumeSpecName: "kube-api-access-nh47d") pod "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" (UID: "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537"). InnerVolumeSpecName "kube-api-access-nh47d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.039578 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-scripts" (OuterVolumeSpecName: "scripts") pod "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" (UID: "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.043536 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-ceph" (OuterVolumeSpecName: "ceph") pod "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" (UID: "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.117655 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data" (OuterVolumeSpecName: "config-data") pod "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" (UID: "a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.135142 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.135205 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nh47d\" (UniqueName: \"kubernetes.io/projected/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-kube-api-access-nh47d\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.135236 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.135259 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.135282 4948 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537-ceph\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.139393 4948 generic.go:334] "Generic (PLEG): container finished" podID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerID="d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae" exitCode=0 Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.139442 4948 generic.go:334] "Generic (PLEG): container finished" podID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerID="3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105" exitCode=1 Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.139443 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537","Type":"ContainerDied","Data":"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae"} Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.139455 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share0-0" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.139523 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537","Type":"ContainerDied","Data":"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105"} Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.139551 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share0-0" event={"ID":"a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537","Type":"ContainerDied","Data":"83c83fd0ab7391ac954d8b9a8029bd433e90f54c5a72e1bb17915beb2a817e49"} Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.139578 4948 scope.go:117] "RemoveContainer" containerID="d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.167877 4948 scope.go:117] "RemoveContainer" containerID="3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.179190 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.185054 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-share-share0-0"] Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.185287 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.194190 4948 scope.go:117] "RemoveContainer" containerID="d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae" Nov 22 05:11:51 crc kubenswrapper[4948]: E1122 05:11:51.196311 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae\": container with ID starting with d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae not found: ID does not exist" containerID="d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.196351 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae"} err="failed to get container status \"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae\": rpc error: code = NotFound desc = could not find container \"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae\": container with ID starting with d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae not found: ID does not exist" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.196377 4948 scope.go:117] "RemoveContainer" containerID="3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105" Nov 22 05:11:51 crc kubenswrapper[4948]: E1122 05:11:51.196823 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105\": container with ID starting with 3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105 not found: ID does not exist" containerID="3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.196872 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105"} err="failed to get container status \"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105\": rpc error: code = NotFound desc = could not find container \"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105\": container with ID starting with 3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105 not found: ID does not exist" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.196910 4948 scope.go:117] "RemoveContainer" containerID="d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.197415 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae"} err="failed to get container status \"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae\": rpc error: code = NotFound desc = could not find container \"d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae\": container with ID starting with d0804371c72e8cd48dc62a99bc21d9af7aa613a09ad51fdd93148e1d4de000ae not found: ID does not exist" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.197438 4948 scope.go:117] "RemoveContainer" containerID="3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.197787 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105"} err="failed to get container status \"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105\": rpc error: code = NotFound desc = could not find container \"3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105\": container with ID starting with 3da28611a3aac26fe9053617dee5b6de0a6413f001344ca190f2cfb2a2962105 not found: ID does not exist" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.226866 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fxxqj"] Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.774657 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" path="/var/lib/kubelet/pods/a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537/volumes" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.825190 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r"] Nov 22 05:11:51 crc kubenswrapper[4948]: E1122 05:11:51.826278 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="probe" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.826344 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="probe" Nov 22 05:11:51 crc kubenswrapper[4948]: E1122 05:11:51.826394 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="manila-share" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.826416 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="manila-share" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.826737 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="manila-share" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.826764 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7a0e7bb-bc72-4fac-89e5-f9f68b0f4537" containerName="probe" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.827589 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.852151 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r"] Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.954172 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckvp6\" (UniqueName: \"kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.954254 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:51 crc kubenswrapper[4948]: I1122 05:11:51.954547 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.044374 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r"] Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.044958 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config-data job-config-data kube-api-access-ckvp6], unattached volumes=[], failed to process volumes=[]: context canceled" pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" podUID="dae59312-7043-48fe-a4f1-39cd42683756" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.056511 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckvp6\" (UniqueName: \"kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.056585 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.056664 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.056810 4948 secret.go:188] Couldn't get secret manila-kuttl-tests/manila-config-data: secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.056791 4948 secret.go:188] Couldn't get secret manila-kuttl-tests/manila-config-data: secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.056868 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data podName:dae59312-7043-48fe-a4f1-39cd42683756 nodeName:}" failed. No retries permitted until 2025-11-22 05:11:52.556849433 +0000 UTC m=+1515.242859959 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "job-config-data" (UniqueName: "kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data") pod "manila-service-cleanup-n5b5h655-4f92r" (UID: "dae59312-7043-48fe-a4f1-39cd42683756") : secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.056899 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data podName:dae59312-7043-48fe-a4f1-39cd42683756 nodeName:}" failed. No retries permitted until 2025-11-22 05:11:52.556876384 +0000 UTC m=+1515.242887000 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data") pod "manila-service-cleanup-n5b5h655-4f92r" (UID: "dae59312-7043-48fe-a4f1-39cd42683756") : secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.061350 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-db-sync-vhphg"] Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.062456 4948 projected.go:194] Error preparing data for projected volume kube-api-access-ckvp6 for pod manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r: failed to fetch token: serviceaccounts "manila-manila" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.062549 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6 podName:dae59312-7043-48fe-a4f1-39cd42683756 nodeName:}" failed. No retries permitted until 2025-11-22 05:11:52.562528875 +0000 UTC m=+1515.248539471 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-ckvp6" (UniqueName: "kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6") pod "manila-service-cleanup-n5b5h655-4f92r" (UID: "dae59312-7043-48fe-a4f1-39cd42683756") : failed to fetch token: serviceaccounts "manila-manila" not found Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.072072 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-db-sync-vhphg"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.078344 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.078652 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-0" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="manila-scheduler" containerID="cri-o://92d0685a3cd38485807b86be49b0f2a64504b1722584321fa0a897e865fe1a3e" gracePeriod=30 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.078766 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-scheduler-0" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="probe" containerID="cri-o://e980ff13a6f51ba38cd9acd76625d5e28e66d00a586b5e0a3b913ae75135e51c" gracePeriod=30 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.091124 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share1-0"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.091416 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share1-0" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="manila-share" containerID="cri-o://73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31" gracePeriod=30 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.091529 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-share-share1-0" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="probe" containerID="cri-o://f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e" gracePeriod=30 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.101376 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/manila20fe-account-delete-sctb8"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.102438 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.111659 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila20fe-account-delete-sctb8"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.129119 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.129374 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-0" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api-log" containerID="cri-o://deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4" gracePeriod=30 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.129556 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/manila-api-0" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api" containerID="cri-o://653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130" gracePeriod=30 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.158402 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.172102 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.259782 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lclz\" (UniqueName: \"kubernetes.io/projected/ddfc3408-4389-4f52-8658-9c239dd9155c-kube-api-access-7lclz\") pod \"manila20fe-account-delete-sctb8\" (UID: \"ddfc3408-4389-4f52-8658-9c239dd9155c\") " pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.360849 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lclz\" (UniqueName: \"kubernetes.io/projected/ddfc3408-4389-4f52-8658-9c239dd9155c-kube-api-access-7lclz\") pod \"manila20fe-account-delete-sctb8\" (UID: \"ddfc3408-4389-4f52-8658-9c239dd9155c\") " pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.382211 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lclz\" (UniqueName: \"kubernetes.io/projected/ddfc3408-4389-4f52-8658-9c239dd9155c-kube-api-access-7lclz\") pod \"manila20fe-account-delete-sctb8\" (UID: \"ddfc3408-4389-4f52-8658-9c239dd9155c\") " pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.422722 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.563985 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.564389 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.564437 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckvp6\" (UniqueName: \"kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6\") pod \"manila-service-cleanup-n5b5h655-4f92r\" (UID: \"dae59312-7043-48fe-a4f1-39cd42683756\") " pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.564107 4948 secret.go:188] Couldn't get secret manila-kuttl-tests/manila-config-data: secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.564850 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data podName:dae59312-7043-48fe-a4f1-39cd42683756 nodeName:}" failed. No retries permitted until 2025-11-22 05:11:53.564832 +0000 UTC m=+1516.250842506 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data") pod "manila-service-cleanup-n5b5h655-4f92r" (UID: "dae59312-7043-48fe-a4f1-39cd42683756") : secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.565118 4948 secret.go:188] Couldn't get secret manila-kuttl-tests/manila-config-data: secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.565143 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data podName:dae59312-7043-48fe-a4f1-39cd42683756 nodeName:}" failed. No retries permitted until 2025-11-22 05:11:53.565134678 +0000 UTC m=+1516.251145194 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "job-config-data" (UniqueName: "kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data") pod "manila-service-cleanup-n5b5h655-4f92r" (UID: "dae59312-7043-48fe-a4f1-39cd42683756") : secret "manila-config-data" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.569534 4948 projected.go:194] Error preparing data for projected volume kube-api-access-ckvp6 for pod manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r: failed to fetch token: serviceaccounts "manila-manila" not found Nov 22 05:11:52 crc kubenswrapper[4948]: E1122 05:11:52.569588 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6 podName:dae59312-7043-48fe-a4f1-39cd42683756 nodeName:}" failed. No retries permitted until 2025-11-22 05:11:53.569574625 +0000 UTC m=+1516.255585141 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-ckvp6" (UniqueName: "kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6") pod "manila-service-cleanup-n5b5h655-4f92r" (UID: "dae59312-7043-48fe-a4f1-39cd42683756") : failed to fetch token: serviceaccounts "manila-manila" not found Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.875610 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/manila20fe-account-delete-sctb8"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.880063 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs"] Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.880295 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="manager" containerID="cri-o://f1498ecab56492bae27357ec7b7d41d601e3f9c44a1eaade88b36ff137201781" gracePeriod=10 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.880757 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="kube-rbac-proxy" containerID="cri-o://cf063af18bf18ab24249018e15a5eec38650eeaddd4610de0a547f77094e8f90" gracePeriod=10 Nov 22 05:11:52 crc kubenswrapper[4948]: I1122 05:11:52.998082 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.012074 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.86:8081/readyz\": dial tcp 10.217.0.86:8081: connect: connection refused" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.166294 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" event={"ID":"ddfc3408-4389-4f52-8658-9c239dd9155c","Type":"ContainerStarted","Data":"f10389ab65b323618622e74f15ffb8e0880f223907ba7bee15481805465c8dc6"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.166330 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" event={"ID":"ddfc3408-4389-4f52-8658-9c239dd9155c","Type":"ContainerStarted","Data":"841d44ca75460f2f66ef03f37e21f8a45982d679a006bd7cc0e474aa788b322a"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.172706 4948 generic.go:334] "Generic (PLEG): container finished" podID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerID="cf063af18bf18ab24249018e15a5eec38650eeaddd4610de0a547f77094e8f90" exitCode=0 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.172742 4948 generic.go:334] "Generic (PLEG): container finished" podID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerID="f1498ecab56492bae27357ec7b7d41d601e3f9c44a1eaade88b36ff137201781" exitCode=0 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.172808 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" event={"ID":"4704d599-bafd-404a-96b3-9cf06bf0658f","Type":"ContainerDied","Data":"cf063af18bf18ab24249018e15a5eec38650eeaddd4610de0a547f77094e8f90"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.172832 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" event={"ID":"4704d599-bafd-404a-96b3-9cf06bf0658f","Type":"ContainerDied","Data":"f1498ecab56492bae27357ec7b7d41d601e3f9c44a1eaade88b36ff137201781"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.183895 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-etc-machine-id\") pod \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.183980 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-var-lib-manila\") pod \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184033 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "7c59af44-b4c5-4c5f-ac58-a256515aa02c" (UID: "7c59af44-b4c5-4c5f-ac58-a256515aa02c"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184065 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7c59af44-b4c5-4c5f-ac58-a256515aa02c" (UID: "7c59af44-b4c5-4c5f-ac58-a256515aa02c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184175 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data-custom\") pod \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184221 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-scripts\") pod \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184238 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-ceph\") pod \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184294 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffmkh\" (UniqueName: \"kubernetes.io/projected/7c59af44-b4c5-4c5f-ac58-a256515aa02c-kube-api-access-ffmkh\") pod \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184313 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data\") pod \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\" (UID: \"7c59af44-b4c5-4c5f-ac58-a256515aa02c\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184594 4948 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.184606 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7c59af44-b4c5-4c5f-ac58-a256515aa02c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.188718 4948 generic.go:334] "Generic (PLEG): container finished" podID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerID="e980ff13a6f51ba38cd9acd76625d5e28e66d00a586b5e0a3b913ae75135e51c" exitCode=0 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.188809 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"83e06745-7dbe-41e5-b492-6a176c6e579f","Type":"ContainerDied","Data":"e980ff13a6f51ba38cd9acd76625d5e28e66d00a586b5e0a3b913ae75135e51c"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.193660 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c59af44-b4c5-4c5f-ac58-a256515aa02c-kube-api-access-ffmkh" (OuterVolumeSpecName: "kube-api-access-ffmkh") pod "7c59af44-b4c5-4c5f-ac58-a256515aa02c" (UID: "7c59af44-b4c5-4c5f-ac58-a256515aa02c"). InnerVolumeSpecName "kube-api-access-ffmkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.195778 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7c59af44-b4c5-4c5f-ac58-a256515aa02c" (UID: "7c59af44-b4c5-4c5f-ac58-a256515aa02c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.196830 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-scripts" (OuterVolumeSpecName: "scripts") pod "7c59af44-b4c5-4c5f-ac58-a256515aa02c" (UID: "7c59af44-b4c5-4c5f-ac58-a256515aa02c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.197418 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-ceph" (OuterVolumeSpecName: "ceph") pod "7c59af44-b4c5-4c5f-ac58-a256515aa02c" (UID: "7c59af44-b4c5-4c5f-ac58-a256515aa02c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.215284 4948 generic.go:334] "Generic (PLEG): container finished" podID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerID="deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4" exitCode=143 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.215393 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"002a2e38-d4b9-4c13-bde1-5b01261fe21d","Type":"ContainerDied","Data":"deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.226870 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" podStartSLOduration=1.226847547 podStartE2EDuration="1.226847547s" podCreationTimestamp="2025-11-22 05:11:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:11:53.216481632 +0000 UTC m=+1515.902492148" watchObservedRunningTime="2025-11-22 05:11:53.226847547 +0000 UTC m=+1515.912858063" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.256892 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/manila-operator-index-fxd8c"] Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.260135 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/manila-operator-index-fxd8c" podUID="45443cdd-98a8-4fc6-bc0e-d621318951ad" containerName="registry-server" containerID="cri-o://c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354" gracePeriod=30 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.272294 4948 generic.go:334] "Generic (PLEG): container finished" podID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerID="f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e" exitCode=0 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.272537 4948 generic.go:334] "Generic (PLEG): container finished" podID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerID="73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31" exitCode=1 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.272731 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fxxqj" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="registry-server" containerID="cri-o://133255a0c91dd3bd641d672102675c567aebe64a8f2bbcf7d01a6e7ae06bdb15" gracePeriod=2 Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.272882 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-share-share1-0" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.273198 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share1-0" event={"ID":"7c59af44-b4c5-4c5f-ac58-a256515aa02c","Type":"ContainerDied","Data":"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.273238 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share1-0" event={"ID":"7c59af44-b4c5-4c5f-ac58-a256515aa02c","Type":"ContainerDied","Data":"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.273248 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-share-share1-0" event={"ID":"7c59af44-b4c5-4c5f-ac58-a256515aa02c","Type":"ContainerDied","Data":"3a2fc35ceade30e30bf2257ab67c13aea8cbf5ec58f8b8190743dc10aabcb2a9"} Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.273262 4948 scope.go:117] "RemoveContainer" containerID="f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.273339 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.285695 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.285729 4948 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-ceph\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.285741 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffmkh\" (UniqueName: \"kubernetes.io/projected/7c59af44-b4c5-4c5f-ac58-a256515aa02c-kube-api-access-ffmkh\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.285771 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.341835 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2"] Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.358660 4948 scope.go:117] "RemoveContainer" containerID="73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.367534 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/8969e63359ad8fed8d254fb6a1fc8b224a5cb911dbfca2a136ad77f38ardjt2"] Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.387234 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data" (OuterVolumeSpecName: "config-data") pod "7c59af44-b4c5-4c5f-ac58-a256515aa02c" (UID: "7c59af44-b4c5-4c5f-ac58-a256515aa02c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.388039 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c59af44-b4c5-4c5f-ac58-a256515aa02c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.395543 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r"] Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.396357 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-service-cleanup-n5b5h655-4f92r"] Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.409332 4948 scope.go:117] "RemoveContainer" containerID="f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e" Nov 22 05:11:53 crc kubenswrapper[4948]: E1122 05:11:53.409800 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e\": container with ID starting with f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e not found: ID does not exist" containerID="f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.409850 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e"} err="failed to get container status \"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e\": rpc error: code = NotFound desc = could not find container \"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e\": container with ID starting with f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e not found: ID does not exist" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.409882 4948 scope.go:117] "RemoveContainer" containerID="73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31" Nov 22 05:11:53 crc kubenswrapper[4948]: E1122 05:11:53.410128 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31\": container with ID starting with 73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31 not found: ID does not exist" containerID="73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.410153 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31"} err="failed to get container status \"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31\": rpc error: code = NotFound desc = could not find container \"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31\": container with ID starting with 73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31 not found: ID does not exist" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.410169 4948 scope.go:117] "RemoveContainer" containerID="f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.410536 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e"} err="failed to get container status \"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e\": rpc error: code = NotFound desc = could not find container \"f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e\": container with ID starting with f9fa0eb54df5eda8552daf885419ae36c31a639ab3d5405429d6e0fcf525431e not found: ID does not exist" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.410559 4948 scope.go:117] "RemoveContainer" containerID="73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.410743 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31"} err="failed to get container status \"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31\": rpc error: code = NotFound desc = could not find container \"73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31\": container with ID starting with 73297df113630c9c45fb32fbd2e779efb40bf957eef13f660fa57d1b1003da31 not found: ID does not exist" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.431451 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.589634 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-webhook-cert\") pod \"4704d599-bafd-404a-96b3-9cf06bf0658f\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.589692 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnr4g\" (UniqueName: \"kubernetes.io/projected/4704d599-bafd-404a-96b3-9cf06bf0658f-kube-api-access-dnr4g\") pod \"4704d599-bafd-404a-96b3-9cf06bf0658f\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.590390 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-apiservice-cert\") pod \"4704d599-bafd-404a-96b3-9cf06bf0658f\" (UID: \"4704d599-bafd-404a-96b3-9cf06bf0658f\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.590780 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.590794 4948 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/dae59312-7043-48fe-a4f1-39cd42683756-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.590804 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckvp6\" (UniqueName: \"kubernetes.io/projected/dae59312-7043-48fe-a4f1-39cd42683756-kube-api-access-ckvp6\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.593730 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "4704d599-bafd-404a-96b3-9cf06bf0658f" (UID: "4704d599-bafd-404a-96b3-9cf06bf0658f"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.604682 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4704d599-bafd-404a-96b3-9cf06bf0658f-kube-api-access-dnr4g" (OuterVolumeSpecName: "kube-api-access-dnr4g") pod "4704d599-bafd-404a-96b3-9cf06bf0658f" (UID: "4704d599-bafd-404a-96b3-9cf06bf0658f"). InnerVolumeSpecName "kube-api-access-dnr4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.611603 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-share-share1-0"] Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.613331 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "4704d599-bafd-404a-96b3-9cf06bf0658f" (UID: "4704d599-bafd-404a-96b3-9cf06bf0658f"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.616002 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-share-share1-0"] Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.692162 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnr4g\" (UniqueName: \"kubernetes.io/projected/4704d599-bafd-404a-96b3-9cf06bf0658f-kube-api-access-dnr4g\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.692192 4948 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.692201 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4704d599-bafd-404a-96b3-9cf06bf0658f-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.768813 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f1c28d6-a954-42f2-9313-3c35189441bf" path="/var/lib/kubelet/pods/2f1c28d6-a954-42f2-9313-3c35189441bf/volumes" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.769659 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" path="/var/lib/kubelet/pods/7c59af44-b4c5-4c5f-ac58-a256515aa02c/volumes" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.770451 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d17870ed-d0e0-4ab7-86bb-0e14eb828406" path="/var/lib/kubelet/pods/d17870ed-d0e0-4ab7-86bb-0e14eb828406/volumes" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.771806 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dae59312-7043-48fe-a4f1-39cd42683756" path="/var/lib/kubelet/pods/dae59312-7043-48fe-a4f1-39cd42683756/volumes" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.772415 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.894622 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffxks\" (UniqueName: \"kubernetes.io/projected/45443cdd-98a8-4fc6-bc0e-d621318951ad-kube-api-access-ffxks\") pod \"45443cdd-98a8-4fc6-bc0e-d621318951ad\" (UID: \"45443cdd-98a8-4fc6-bc0e-d621318951ad\") " Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.900652 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45443cdd-98a8-4fc6-bc0e-d621318951ad-kube-api-access-ffxks" (OuterVolumeSpecName: "kube-api-access-ffxks") pod "45443cdd-98a8-4fc6-bc0e-d621318951ad" (UID: "45443cdd-98a8-4fc6-bc0e-d621318951ad"). InnerVolumeSpecName "kube-api-access-ffxks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:53 crc kubenswrapper[4948]: I1122 05:11:53.996220 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffxks\" (UniqueName: \"kubernetes.io/projected/45443cdd-98a8-4fc6-bc0e-d621318951ad-kube-api-access-ffxks\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.285124 4948 generic.go:334] "Generic (PLEG): container finished" podID="45443cdd-98a8-4fc6-bc0e-d621318951ad" containerID="c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354" exitCode=0 Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.285190 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-index-fxd8c" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.285195 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-index-fxd8c" event={"ID":"45443cdd-98a8-4fc6-bc0e-d621318951ad","Type":"ContainerDied","Data":"c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354"} Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.285270 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-index-fxd8c" event={"ID":"45443cdd-98a8-4fc6-bc0e-d621318951ad","Type":"ContainerDied","Data":"ad296a989570c830bfe5e5f1012e4a496dfec6b7122398b43bde968e2fefca05"} Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.285314 4948 scope.go:117] "RemoveContainer" containerID="c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.289443 4948 generic.go:334] "Generic (PLEG): container finished" podID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerID="133255a0c91dd3bd641d672102675c567aebe64a8f2bbcf7d01a6e7ae06bdb15" exitCode=0 Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.289637 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxxqj" event={"ID":"3adc4805-aba7-4719-9f09-6b54ade74c12","Type":"ContainerDied","Data":"133255a0c91dd3bd641d672102675c567aebe64a8f2bbcf7d01a6e7ae06bdb15"} Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.293633 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.293643 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs" event={"ID":"4704d599-bafd-404a-96b3-9cf06bf0658f","Type":"ContainerDied","Data":"40af8d95214022807ab40bf868d0076fc09b928929d77f43a916240ee2fb3838"} Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.314182 4948 scope.go:117] "RemoveContainer" containerID="c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354" Nov 22 05:11:54 crc kubenswrapper[4948]: E1122 05:11:54.318665 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354\": container with ID starting with c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354 not found: ID does not exist" containerID="c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.318723 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354"} err="failed to get container status \"c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354\": rpc error: code = NotFound desc = could not find container \"c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354\": container with ID starting with c6b3ed6a06260338b4274448497d345fad904efde39b273518580c42fe43d354 not found: ID does not exist" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.318762 4948 scope.go:117] "RemoveContainer" containerID="cf063af18bf18ab24249018e15a5eec38650eeaddd4610de0a547f77094e8f90" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.374600 4948 scope.go:117] "RemoveContainer" containerID="f1498ecab56492bae27357ec7b7d41d601e3f9c44a1eaade88b36ff137201781" Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.383891 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs"] Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.393092 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7d5c54747-f2qjs"] Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.398706 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/manila-operator-index-fxd8c"] Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.404118 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/manila-operator-index-fxd8c"] Nov 22 05:11:54 crc kubenswrapper[4948]: I1122 05:11:54.919412 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.034132 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-catalog-content\") pod \"3adc4805-aba7-4719-9f09-6b54ade74c12\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.034224 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-utilities\") pod \"3adc4805-aba7-4719-9f09-6b54ade74c12\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.034285 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kn7r\" (UniqueName: \"kubernetes.io/projected/3adc4805-aba7-4719-9f09-6b54ade74c12-kube-api-access-4kn7r\") pod \"3adc4805-aba7-4719-9f09-6b54ade74c12\" (UID: \"3adc4805-aba7-4719-9f09-6b54ade74c12\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.035001 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-utilities" (OuterVolumeSpecName: "utilities") pod "3adc4805-aba7-4719-9f09-6b54ade74c12" (UID: "3adc4805-aba7-4719-9f09-6b54ade74c12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.045636 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3adc4805-aba7-4719-9f09-6b54ade74c12-kube-api-access-4kn7r" (OuterVolumeSpecName: "kube-api-access-4kn7r") pod "3adc4805-aba7-4719-9f09-6b54ade74c12" (UID: "3adc4805-aba7-4719-9f09-6b54ade74c12"). InnerVolumeSpecName "kube-api-access-4kn7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.098836 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3adc4805-aba7-4719-9f09-6b54ade74c12" (UID: "3adc4805-aba7-4719-9f09-6b54ade74c12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.136763 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kn7r\" (UniqueName: \"kubernetes.io/projected/3adc4805-aba7-4719-9f09-6b54ade74c12-kube-api-access-4kn7r\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.137025 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.137122 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc4805-aba7-4719-9f09-6b54ade74c12-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.303079 4948 generic.go:334] "Generic (PLEG): container finished" podID="ddfc3408-4389-4f52-8658-9c239dd9155c" containerID="f10389ab65b323618622e74f15ffb8e0880f223907ba7bee15481805465c8dc6" exitCode=0 Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.303136 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" event={"ID":"ddfc3408-4389-4f52-8658-9c239dd9155c","Type":"ContainerDied","Data":"f10389ab65b323618622e74f15ffb8e0880f223907ba7bee15481805465c8dc6"} Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.305618 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fxxqj" event={"ID":"3adc4805-aba7-4719-9f09-6b54ade74c12","Type":"ContainerDied","Data":"ec9ecde752857b3b67acb17d05af973f564818b233da6d3fce152df6af45e098"} Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.305650 4948 scope.go:117] "RemoveContainer" containerID="133255a0c91dd3bd641d672102675c567aebe64a8f2bbcf7d01a6e7ae06bdb15" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.305660 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fxxqj" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.327849 4948 scope.go:117] "RemoveContainer" containerID="29c7046492d7e6cc7ac82d82c8cbae3d755580b6aa515ddfee739069a28aafbc" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.406409 4948 scope.go:117] "RemoveContainer" containerID="7cd762c053010591d23851d4809ba25fa004c509c30a1f8fa562a3cf2f925b7c" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.414326 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fxxqj"] Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.421458 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fxxqj"] Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.448931 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="manila-kuttl-tests/manila-api-0" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api" probeResult="failure" output="Get \"http://10.217.0.112:8786/healthcheck\": dial tcp 10.217.0.112:8786: connect: connection refused" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.765232 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" path="/var/lib/kubelet/pods/3adc4805-aba7-4719-9f09-6b54ade74c12/volumes" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.766078 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45443cdd-98a8-4fc6-bc0e-d621318951ad" path="/var/lib/kubelet/pods/45443cdd-98a8-4fc6-bc0e-d621318951ad/volumes" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.766586 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" path="/var/lib/kubelet/pods/4704d599-bafd-404a-96b3-9cf06bf0658f/volumes" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.796378 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.845789 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-scripts\") pod \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.845916 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data-custom\") pod \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.845995 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bdrp\" (UniqueName: \"kubernetes.io/projected/002a2e38-d4b9-4c13-bde1-5b01261fe21d-kube-api-access-7bdrp\") pod \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.846020 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/002a2e38-d4b9-4c13-bde1-5b01261fe21d-etc-machine-id\") pod \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.846071 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data\") pod \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.846087 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/002a2e38-d4b9-4c13-bde1-5b01261fe21d-logs\") pod \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\" (UID: \"002a2e38-d4b9-4c13-bde1-5b01261fe21d\") " Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.846151 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/002a2e38-d4b9-4c13-bde1-5b01261fe21d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "002a2e38-d4b9-4c13-bde1-5b01261fe21d" (UID: "002a2e38-d4b9-4c13-bde1-5b01261fe21d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.846458 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/002a2e38-d4b9-4c13-bde1-5b01261fe21d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.847403 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/002a2e38-d4b9-4c13-bde1-5b01261fe21d-logs" (OuterVolumeSpecName: "logs") pod "002a2e38-d4b9-4c13-bde1-5b01261fe21d" (UID: "002a2e38-d4b9-4c13-bde1-5b01261fe21d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.853660 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/002a2e38-d4b9-4c13-bde1-5b01261fe21d-kube-api-access-7bdrp" (OuterVolumeSpecName: "kube-api-access-7bdrp") pod "002a2e38-d4b9-4c13-bde1-5b01261fe21d" (UID: "002a2e38-d4b9-4c13-bde1-5b01261fe21d"). InnerVolumeSpecName "kube-api-access-7bdrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.853676 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "002a2e38-d4b9-4c13-bde1-5b01261fe21d" (UID: "002a2e38-d4b9-4c13-bde1-5b01261fe21d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.853705 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-scripts" (OuterVolumeSpecName: "scripts") pod "002a2e38-d4b9-4c13-bde1-5b01261fe21d" (UID: "002a2e38-d4b9-4c13-bde1-5b01261fe21d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.884663 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data" (OuterVolumeSpecName: "config-data") pod "002a2e38-d4b9-4c13-bde1-5b01261fe21d" (UID: "002a2e38-d4b9-4c13-bde1-5b01261fe21d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.947855 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.947882 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.947894 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bdrp\" (UniqueName: \"kubernetes.io/projected/002a2e38-d4b9-4c13-bde1-5b01261fe21d-kube-api-access-7bdrp\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.947903 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/002a2e38-d4b9-4c13-bde1-5b01261fe21d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:55 crc kubenswrapper[4948]: I1122 05:11:55.947912 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/002a2e38-d4b9-4c13-bde1-5b01261fe21d-logs\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.319494 4948 generic.go:334] "Generic (PLEG): container finished" podID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerID="653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130" exitCode=0 Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.319558 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"002a2e38-d4b9-4c13-bde1-5b01261fe21d","Type":"ContainerDied","Data":"653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130"} Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.319872 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-api-0" event={"ID":"002a2e38-d4b9-4c13-bde1-5b01261fe21d","Type":"ContainerDied","Data":"df725982e65c7b0c2d018dbafc0633366a9b1ac88f195ade250c7e9c1b03eb2a"} Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.319913 4948 scope.go:117] "RemoveContainer" containerID="653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.319614 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-api-0" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.364995 4948 scope.go:117] "RemoveContainer" containerID="deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.377226 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.386339 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-api-0"] Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.386733 4948 scope.go:117] "RemoveContainer" containerID="653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130" Nov 22 05:11:56 crc kubenswrapper[4948]: E1122 05:11:56.387233 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130\": container with ID starting with 653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130 not found: ID does not exist" containerID="653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.387298 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130"} err="failed to get container status \"653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130\": rpc error: code = NotFound desc = could not find container \"653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130\": container with ID starting with 653f982891da87d24ca29b5ae6862af7af8d2c48a5845abd033a7f991fbd8130 not found: ID does not exist" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.387338 4948 scope.go:117] "RemoveContainer" containerID="deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4" Nov 22 05:11:56 crc kubenswrapper[4948]: E1122 05:11:56.387881 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4\": container with ID starting with deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4 not found: ID does not exist" containerID="deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.387940 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4"} err="failed to get container status \"deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4\": rpc error: code = NotFound desc = could not find container \"deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4\": container with ID starting with deee8950e3273f2cf2b6d0ac5d34268a86924c983c992f1d86b1ec5f98791aa4 not found: ID does not exist" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.642267 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.759213 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lclz\" (UniqueName: \"kubernetes.io/projected/ddfc3408-4389-4f52-8658-9c239dd9155c-kube-api-access-7lclz\") pod \"ddfc3408-4389-4f52-8658-9c239dd9155c\" (UID: \"ddfc3408-4389-4f52-8658-9c239dd9155c\") " Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.781840 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddfc3408-4389-4f52-8658-9c239dd9155c-kube-api-access-7lclz" (OuterVolumeSpecName: "kube-api-access-7lclz") pod "ddfc3408-4389-4f52-8658-9c239dd9155c" (UID: "ddfc3408-4389-4f52-8658-9c239dd9155c"). InnerVolumeSpecName "kube-api-access-7lclz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:56 crc kubenswrapper[4948]: I1122 05:11:56.861629 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lclz\" (UniqueName: \"kubernetes.io/projected/ddfc3408-4389-4f52-8658-9c239dd9155c-kube-api-access-7lclz\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.140845 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-db-create-fcl2h"] Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.155478 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-20fe-account-create-rvj7f"] Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.162079 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila20fe-account-delete-sctb8"] Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.167574 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-db-create-fcl2h"] Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.173694 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-20fe-account-create-rvj7f"] Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.179741 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila20fe-account-delete-sctb8"] Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.336519 4948 generic.go:334] "Generic (PLEG): container finished" podID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerID="92d0685a3cd38485807b86be49b0f2a64504b1722584321fa0a897e865fe1a3e" exitCode=0 Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.336655 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"83e06745-7dbe-41e5-b492-6a176c6e579f","Type":"ContainerDied","Data":"92d0685a3cd38485807b86be49b0f2a64504b1722584321fa0a897e865fe1a3e"} Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.339162 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="841d44ca75460f2f66ef03f37e21f8a45982d679a006bd7cc0e474aa788b322a" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.339226 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila20fe-account-delete-sctb8" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.564396 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.672445 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-scripts\") pod \"83e06745-7dbe-41e5-b492-6a176c6e579f\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.672616 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data-custom\") pod \"83e06745-7dbe-41e5-b492-6a176c6e579f\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.672718 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data\") pod \"83e06745-7dbe-41e5-b492-6a176c6e579f\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.672804 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e06745-7dbe-41e5-b492-6a176c6e579f-etc-machine-id\") pod \"83e06745-7dbe-41e5-b492-6a176c6e579f\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.672879 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jckpc\" (UniqueName: \"kubernetes.io/projected/83e06745-7dbe-41e5-b492-6a176c6e579f-kube-api-access-jckpc\") pod \"83e06745-7dbe-41e5-b492-6a176c6e579f\" (UID: \"83e06745-7dbe-41e5-b492-6a176c6e579f\") " Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.674009 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/83e06745-7dbe-41e5-b492-6a176c6e579f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "83e06745-7dbe-41e5-b492-6a176c6e579f" (UID: "83e06745-7dbe-41e5-b492-6a176c6e579f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.677452 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83e06745-7dbe-41e5-b492-6a176c6e579f-kube-api-access-jckpc" (OuterVolumeSpecName: "kube-api-access-jckpc") pod "83e06745-7dbe-41e5-b492-6a176c6e579f" (UID: "83e06745-7dbe-41e5-b492-6a176c6e579f"). InnerVolumeSpecName "kube-api-access-jckpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.677609 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-scripts" (OuterVolumeSpecName: "scripts") pod "83e06745-7dbe-41e5-b492-6a176c6e579f" (UID: "83e06745-7dbe-41e5-b492-6a176c6e579f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.684303 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "83e06745-7dbe-41e5-b492-6a176c6e579f" (UID: "83e06745-7dbe-41e5-b492-6a176c6e579f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.736052 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data" (OuterVolumeSpecName: "config-data") pod "83e06745-7dbe-41e5-b492-6a176c6e579f" (UID: "83e06745-7dbe-41e5-b492-6a176c6e579f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.774820 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e06745-7dbe-41e5-b492-6a176c6e579f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.774847 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jckpc\" (UniqueName: \"kubernetes.io/projected/83e06745-7dbe-41e5-b492-6a176c6e579f-kube-api-access-jckpc\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.774858 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.774867 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.774875 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e06745-7dbe-41e5-b492-6a176c6e579f-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.776587 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" path="/var/lib/kubelet/pods/002a2e38-d4b9-4c13-bde1-5b01261fe21d/volumes" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.778080 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="795a2378-eed3-4d54-ae4f-0e73ae06efe9" path="/var/lib/kubelet/pods/795a2378-eed3-4d54-ae4f-0e73ae06efe9/volumes" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.779127 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6fa8999-034c-434a-8003-e03df12a0d32" path="/var/lib/kubelet/pods/d6fa8999-034c-434a-8003-e03df12a0d32/volumes" Nov 22 05:11:57 crc kubenswrapper[4948]: I1122 05:11:57.780177 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddfc3408-4389-4f52-8658-9c239dd9155c" path="/var/lib/kubelet/pods/ddfc3408-4389-4f52-8658-9c239dd9155c/volumes" Nov 22 05:11:58 crc kubenswrapper[4948]: I1122 05:11:58.352345 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/manila-scheduler-0" event={"ID":"83e06745-7dbe-41e5-b492-6a176c6e579f","Type":"ContainerDied","Data":"608fa0006a86333dd7c4c82d5461b75986c91926ccc8d1cea2422c437db60805"} Nov 22 05:11:58 crc kubenswrapper[4948]: I1122 05:11:58.352422 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/manila-scheduler-0" Nov 22 05:11:58 crc kubenswrapper[4948]: I1122 05:11:58.352434 4948 scope.go:117] "RemoveContainer" containerID="e980ff13a6f51ba38cd9acd76625d5e28e66d00a586b5e0a3b913ae75135e51c" Nov 22 05:11:58 crc kubenswrapper[4948]: I1122 05:11:58.384851 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:11:58 crc kubenswrapper[4948]: I1122 05:11:58.394127 4948 scope.go:117] "RemoveContainer" containerID="92d0685a3cd38485807b86be49b0f2a64504b1722584321fa0a897e865fe1a3e" Nov 22 05:11:58 crc kubenswrapper[4948]: I1122 05:11:58.397308 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/manila-scheduler-0"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.772690 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" path="/var/lib/kubelet/pods/83e06745-7dbe-41e5-b492-6a176c6e579f/volumes" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.789995 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.790083 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.790134 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.790838 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"23c94a6fcacee9c3faebd2019427dd27c2a4acb1f102d2cd48e6ac38c0f38971"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.790904 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://23c94a6fcacee9c3faebd2019427dd27c2a4acb1f102d2cd48e6ac38c0f38971" gracePeriod=600 Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.871725 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystone-db-sync-ngm5n"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.877293 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystone-bootstrap-68cnx"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.883898 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/keystone-db-sync-ngm5n"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.887934 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/keystone-bootstrap-68cnx"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.892537 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystone-66bcbf78b6-ndxxc"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.892860 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" podUID="271e3d90-f82c-4001-8df0-acf407e4743a" containerName="keystone-api" containerID="cri-o://e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa" gracePeriod=30 Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.920852 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm"] Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.921239 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddfc3408-4389-4f52-8658-9c239dd9155c" containerName="mariadb-account-delete" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.921352 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddfc3408-4389-4f52-8658-9c239dd9155c" containerName="mariadb-account-delete" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.921407 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="kube-rbac-proxy" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.921453 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="kube-rbac-proxy" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.921517 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="registry-server" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.921572 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="registry-server" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.921623 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45443cdd-98a8-4fc6-bc0e-d621318951ad" containerName="registry-server" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.921668 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="45443cdd-98a8-4fc6-bc0e-d621318951ad" containerName="registry-server" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.921716 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="manager" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.921761 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="manager" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.921814 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="extract-content" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.921869 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="extract-content" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.921935 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.921984 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.922063 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="probe" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.922131 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="probe" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.922204 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="probe" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.922270 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="probe" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.922344 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api-log" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.922415 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api-log" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.922529 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="manila-scheduler" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.922608 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="manila-scheduler" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.922701 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="manila-share" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.922774 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="manila-share" Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.922950 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="extract-utilities" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923027 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="extract-utilities" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923255 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="manila-scheduler" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923352 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="3adc4805-aba7-4719-9f09-6b54ade74c12" containerName="registry-server" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923435 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e06745-7dbe-41e5-b492-6a176c6e579f" containerName="probe" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923510 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="probe" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923608 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="manager" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923692 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api-log" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923744 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="45443cdd-98a8-4fc6-bc0e-d621318951ad" containerName="registry-server" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923790 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4704d599-bafd-404a-96b3-9cf06bf0658f" containerName="kube-rbac-proxy" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923838 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="002a2e38-d4b9-4c13-bde1-5b01261fe21d" containerName="manila-api" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923884 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c59af44-b4c5-4c5f-ac58-a256515aa02c" containerName="manila-share" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.923939 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddfc3408-4389-4f52-8658-9c239dd9155c" containerName="mariadb-account-delete" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.924401 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.942008 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.958501 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystone-db-create-flbjx"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.966955 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/keystone-db-create-flbjx"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.982818 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystone-b2aa-account-create-g94z9"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.986760 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/keystone-b2aa-account-create-g94z9"] Nov 22 05:11:59 crc kubenswrapper[4948]: I1122 05:11:59.990487 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm"] Nov 22 05:11:59 crc kubenswrapper[4948]: E1122 05:11:59.990931 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-fwkdt], unattached volumes=[], failed to process volumes=[kube-api-access-fwkdt]: context canceled" pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" podUID="fefd8777-4031-4cd7-81a4-a1f8cec10eed" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.012108 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwkdt\" (UniqueName: \"kubernetes.io/projected/fefd8777-4031-4cd7-81a4-a1f8cec10eed-kube-api-access-fwkdt\") pod \"keystoneb2aa-account-delete-mz9nm\" (UID: \"fefd8777-4031-4cd7-81a4-a1f8cec10eed\") " pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.114073 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwkdt\" (UniqueName: \"kubernetes.io/projected/fefd8777-4031-4cd7-81a4-a1f8cec10eed-kube-api-access-fwkdt\") pod \"keystoneb2aa-account-delete-mz9nm\" (UID: \"fefd8777-4031-4cd7-81a4-a1f8cec10eed\") " pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.133385 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwkdt\" (UniqueName: \"kubernetes.io/projected/fefd8777-4031-4cd7-81a4-a1f8cec10eed-kube-api-access-fwkdt\") pod \"keystoneb2aa-account-delete-mz9nm\" (UID: \"fefd8777-4031-4cd7-81a4-a1f8cec10eed\") " pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.374986 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="23c94a6fcacee9c3faebd2019427dd27c2a4acb1f102d2cd48e6ac38c0f38971" exitCode=0 Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.375074 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.375235 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"23c94a6fcacee9c3faebd2019427dd27c2a4acb1f102d2cd48e6ac38c0f38971"} Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.375275 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754"} Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.375292 4948 scope.go:117] "RemoveContainer" containerID="a0943f4e445f12f6dc5d4e849eee67dd81bfc6359d81ae42402f37fb54747939" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.392789 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.416982 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwkdt\" (UniqueName: \"kubernetes.io/projected/fefd8777-4031-4cd7-81a4-a1f8cec10eed-kube-api-access-fwkdt\") pod \"fefd8777-4031-4cd7-81a4-a1f8cec10eed\" (UID: \"fefd8777-4031-4cd7-81a4-a1f8cec10eed\") " Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.420851 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fefd8777-4031-4cd7-81a4-a1f8cec10eed-kube-api-access-fwkdt" (OuterVolumeSpecName: "kube-api-access-fwkdt") pod "fefd8777-4031-4cd7-81a4-a1f8cec10eed" (UID: "fefd8777-4031-4cd7-81a4-a1f8cec10eed"). InnerVolumeSpecName "kube-api-access-fwkdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.519080 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwkdt\" (UniqueName: \"kubernetes.io/projected/fefd8777-4031-4cd7-81a4-a1f8cec10eed-kube-api-access-fwkdt\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.793194 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/openstack-galera-0"] Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.808271 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/openstack-galera-1"] Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.813869 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/openstack-galera-2"] Nov 22 05:12:00 crc kubenswrapper[4948]: I1122 05:12:00.944473 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/openstack-galera-2" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="galera" containerID="cri-o://8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624" gracePeriod=30 Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.388078 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm" Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.445436 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm"] Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.457893 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/keystoneb2aa-account-delete-mz9nm"] Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.463665 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/memcached-0"] Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.463906 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/memcached-0" podUID="2605c37d-54b8-4424-8068-ff0350f44403" containerName="memcached" containerID="cri-o://c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4" gracePeriod=30 Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.773289 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4" path="/var/lib/kubelet/pods/3b56ee13-cdc0-403c-8579-c5fbe2bcb4f4/volumes" Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.774891 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68eb22ec-e675-4253-b5fa-61da8b4d5050" path="/var/lib/kubelet/pods/68eb22ec-e675-4253-b5fa-61da8b4d5050/volumes" Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.775922 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c11a057-0fc1-4b03-83e3-f8be2decfcc5" path="/var/lib/kubelet/pods/6c11a057-0fc1-4b03-83e3-f8be2decfcc5/volumes" Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.776989 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d36b03e3-7b82-45ae-8ba7-59af108509b7" path="/var/lib/kubelet/pods/d36b03e3-7b82-45ae-8ba7-59af108509b7/volumes" Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.778802 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fefd8777-4031-4cd7-81a4-a1f8cec10eed" path="/var/lib/kubelet/pods/fefd8777-4031-4cd7-81a4-a1f8cec10eed/volumes" Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.959160 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["manila-kuttl-tests/rabbitmq-server-0"] Nov 22 05:12:01 crc kubenswrapper[4948]: I1122 05:12:01.983809 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.050114 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-kolla-config\") pod \"9b8859c7-31db-4617-acb7-096f251ea3ae\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.050832 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-operator-scripts\") pod \"9b8859c7-31db-4617-acb7-096f251ea3ae\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.050902 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"9b8859c7-31db-4617-acb7-096f251ea3ae\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.050953 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-default\") pod \"9b8859c7-31db-4617-acb7-096f251ea3ae\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.051017 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-generated\") pod \"9b8859c7-31db-4617-acb7-096f251ea3ae\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.051144 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq8ck\" (UniqueName: \"kubernetes.io/projected/9b8859c7-31db-4617-acb7-096f251ea3ae-kube-api-access-tq8ck\") pod \"9b8859c7-31db-4617-acb7-096f251ea3ae\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.051192 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/9b8859c7-31db-4617-acb7-096f251ea3ae-secrets\") pod \"9b8859c7-31db-4617-acb7-096f251ea3ae\" (UID: \"9b8859c7-31db-4617-acb7-096f251ea3ae\") " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.053891 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "9b8859c7-31db-4617-acb7-096f251ea3ae" (UID: "9b8859c7-31db-4617-acb7-096f251ea3ae"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.053968 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "9b8859c7-31db-4617-acb7-096f251ea3ae" (UID: "9b8859c7-31db-4617-acb7-096f251ea3ae"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.054336 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "9b8859c7-31db-4617-acb7-096f251ea3ae" (UID: "9b8859c7-31db-4617-acb7-096f251ea3ae"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.054921 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b8859c7-31db-4617-acb7-096f251ea3ae" (UID: "9b8859c7-31db-4617-acb7-096f251ea3ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.059255 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b8859c7-31db-4617-acb7-096f251ea3ae-kube-api-access-tq8ck" (OuterVolumeSpecName: "kube-api-access-tq8ck") pod "9b8859c7-31db-4617-acb7-096f251ea3ae" (UID: "9b8859c7-31db-4617-acb7-096f251ea3ae"). InnerVolumeSpecName "kube-api-access-tq8ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.059502 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b8859c7-31db-4617-acb7-096f251ea3ae-secrets" (OuterVolumeSpecName: "secrets") pod "9b8859c7-31db-4617-acb7-096f251ea3ae" (UID: "9b8859c7-31db-4617-acb7-096f251ea3ae"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.064969 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "9b8859c7-31db-4617-acb7-096f251ea3ae" (UID: "9b8859c7-31db-4617-acb7-096f251ea3ae"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.153042 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq8ck\" (UniqueName: \"kubernetes.io/projected/9b8859c7-31db-4617-acb7-096f251ea3ae-kube-api-access-tq8ck\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.153430 4948 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/9b8859c7-31db-4617-acb7-096f251ea3ae-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.153442 4948 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.153450 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.153689 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.153703 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.153713 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9b8859c7-31db-4617-acb7-096f251ea3ae-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.172853 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.255010 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.334583 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/rabbitmq-server-0"] Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.395340 4948 generic.go:334] "Generic (PLEG): container finished" podID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerID="8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624" exitCode=0 Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.395391 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-2" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.395438 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-2" event={"ID":"9b8859c7-31db-4617-acb7-096f251ea3ae","Type":"ContainerDied","Data":"8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624"} Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.395477 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-2" event={"ID":"9b8859c7-31db-4617-acb7-096f251ea3ae","Type":"ContainerDied","Data":"17fc7c80ae8732c48dbf9375d6fbd976e035f5b6bcef093ee86c2f3ffd899533"} Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.395495 4948 scope.go:117] "RemoveContainer" containerID="8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.416786 4948 scope.go:117] "RemoveContainer" containerID="0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.430174 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/rabbitmq-server-0" podUID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerName="rabbitmq" containerID="cri-o://6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472" gracePeriod=604800 Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.433859 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/openstack-galera-2"] Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.437437 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/openstack-galera-2"] Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.443551 4948 scope.go:117] "RemoveContainer" containerID="8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624" Nov 22 05:12:02 crc kubenswrapper[4948]: E1122 05:12:02.445159 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624\": container with ID starting with 8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624 not found: ID does not exist" containerID="8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.445202 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624"} err="failed to get container status \"8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624\": rpc error: code = NotFound desc = could not find container \"8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624\": container with ID starting with 8878ab6893e0f49b27c464af240bd296a26fad1f9df16c290aea3127713ca624 not found: ID does not exist" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.445233 4948 scope.go:117] "RemoveContainer" containerID="0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb" Nov 22 05:12:02 crc kubenswrapper[4948]: E1122 05:12:02.445684 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb\": container with ID starting with 0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb not found: ID does not exist" containerID="0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.445761 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb"} err="failed to get container status \"0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb\": rpc error: code = NotFound desc = could not find container \"0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb\": container with ID starting with 0b99312f09ca914bc8ac4f54ce75800413036686963b8bdbbf7085bbf25f14eb not found: ID does not exist" Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.853189 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/ceph"] Nov 22 05:12:02 crc kubenswrapper[4948]: I1122 05:12:02.853442 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/ceph" podUID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" containerName="ceph" containerID="cri-o://91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba" gracePeriod=30 Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.001742 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/openstack-galera-1" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerName="galera" containerID="cri-o://e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" gracePeriod=28 Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.163394 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/memcached-0" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.274312 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-config-data\") pod \"2605c37d-54b8-4424-8068-ff0350f44403\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.274349 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kv6hh\" (UniqueName: \"kubernetes.io/projected/2605c37d-54b8-4424-8068-ff0350f44403-kube-api-access-kv6hh\") pod \"2605c37d-54b8-4424-8068-ff0350f44403\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.274496 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-kolla-config\") pod \"2605c37d-54b8-4424-8068-ff0350f44403\" (UID: \"2605c37d-54b8-4424-8068-ff0350f44403\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.275681 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "2605c37d-54b8-4424-8068-ff0350f44403" (UID: "2605c37d-54b8-4424-8068-ff0350f44403"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.276280 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-config-data" (OuterVolumeSpecName: "config-data") pod "2605c37d-54b8-4424-8068-ff0350f44403" (UID: "2605c37d-54b8-4424-8068-ff0350f44403"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.296980 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2605c37d-54b8-4424-8068-ff0350f44403-kube-api-access-kv6hh" (OuterVolumeSpecName: "kube-api-access-kv6hh") pod "2605c37d-54b8-4424-8068-ff0350f44403" (UID: "2605c37d-54b8-4424-8068-ff0350f44403"). InnerVolumeSpecName "kube-api-access-kv6hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.376979 4948 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.377044 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2605c37d-54b8-4424-8068-ff0350f44403-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.377063 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kv6hh\" (UniqueName: \"kubernetes.io/projected/2605c37d-54b8-4424-8068-ff0350f44403-kube-api-access-kv6hh\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.388860 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.403882 4948 generic.go:334] "Generic (PLEG): container finished" podID="271e3d90-f82c-4001-8df0-acf407e4743a" containerID="e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa" exitCode=0 Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.403948 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" event={"ID":"271e3d90-f82c-4001-8df0-acf407e4743a","Type":"ContainerDied","Data":"e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa"} Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.403975 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" event={"ID":"271e3d90-f82c-4001-8df0-acf407e4743a","Type":"ContainerDied","Data":"591c4320f471cb5b49ddb2425de1aa3c7b669555a68102b99cce6fb8dfeecbca"} Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.403998 4948 scope.go:117] "RemoveContainer" containerID="e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.404112 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/keystone-66bcbf78b6-ndxxc" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.413885 4948 generic.go:334] "Generic (PLEG): container finished" podID="2605c37d-54b8-4424-8068-ff0350f44403" containerID="c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4" exitCode=0 Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.413940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/memcached-0" event={"ID":"2605c37d-54b8-4424-8068-ff0350f44403","Type":"ContainerDied","Data":"c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4"} Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.413961 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/memcached-0" event={"ID":"2605c37d-54b8-4424-8068-ff0350f44403","Type":"ContainerDied","Data":"f4acaf537dd3fa6937e18f2af49d5a62aa81e38f7241d065138e8dabb59d657c"} Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.413944 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/memcached-0" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.447164 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/memcached-0"] Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.447754 4948 scope.go:117] "RemoveContainer" containerID="e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa" Nov 22 05:12:03 crc kubenswrapper[4948]: E1122 05:12:03.448225 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa\": container with ID starting with e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa not found: ID does not exist" containerID="e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.448259 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa"} err="failed to get container status \"e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa\": rpc error: code = NotFound desc = could not find container \"e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa\": container with ID starting with e370cc9fc736684c6637a27ce07c608a2c1ce534d19f82672f46b8c282e23dfa not found: ID does not exist" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.448278 4948 scope.go:117] "RemoveContainer" containerID="c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.448975 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/memcached-0"] Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.461976 4948 scope.go:117] "RemoveContainer" containerID="c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4" Nov 22 05:12:03 crc kubenswrapper[4948]: E1122 05:12:03.462453 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4\": container with ID starting with c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4 not found: ID does not exist" containerID="c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.462542 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4"} err="failed to get container status \"c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4\": rpc error: code = NotFound desc = could not find container \"c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4\": container with ID starting with c092ce5a790574637b4f5efda1dd52d2e34f035cca39f3b371191f5a859a13a4 not found: ID does not exist" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.478904 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svlzv\" (UniqueName: \"kubernetes.io/projected/271e3d90-f82c-4001-8df0-acf407e4743a-kube-api-access-svlzv\") pod \"271e3d90-f82c-4001-8df0-acf407e4743a\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.478999 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-scripts\") pod \"271e3d90-f82c-4001-8df0-acf407e4743a\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.479077 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-config-data\") pod \"271e3d90-f82c-4001-8df0-acf407e4743a\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.479122 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-credential-keys\") pod \"271e3d90-f82c-4001-8df0-acf407e4743a\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.479176 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-fernet-keys\") pod \"271e3d90-f82c-4001-8df0-acf407e4743a\" (UID: \"271e3d90-f82c-4001-8df0-acf407e4743a\") " Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.483578 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "271e3d90-f82c-4001-8df0-acf407e4743a" (UID: "271e3d90-f82c-4001-8df0-acf407e4743a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.484270 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/271e3d90-f82c-4001-8df0-acf407e4743a-kube-api-access-svlzv" (OuterVolumeSpecName: "kube-api-access-svlzv") pod "271e3d90-f82c-4001-8df0-acf407e4743a" (UID: "271e3d90-f82c-4001-8df0-acf407e4743a"). InnerVolumeSpecName "kube-api-access-svlzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.484700 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "271e3d90-f82c-4001-8df0-acf407e4743a" (UID: "271e3d90-f82c-4001-8df0-acf407e4743a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.484806 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-scripts" (OuterVolumeSpecName: "scripts") pod "271e3d90-f82c-4001-8df0-acf407e4743a" (UID: "271e3d90-f82c-4001-8df0-acf407e4743a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.498073 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-config-data" (OuterVolumeSpecName: "config-data") pod "271e3d90-f82c-4001-8df0-acf407e4743a" (UID: "271e3d90-f82c-4001-8df0-acf407e4743a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.580868 4948 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.580902 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svlzv\" (UniqueName: \"kubernetes.io/projected/271e3d90-f82c-4001-8df0-acf407e4743a-kube-api-access-svlzv\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.580911 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.580921 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.580929 4948 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/271e3d90-f82c-4001-8df0-acf407e4743a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.752603 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/keystone-66bcbf78b6-ndxxc"] Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.767290 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2605c37d-54b8-4424-8068-ff0350f44403" path="/var/lib/kubelet/pods/2605c37d-54b8-4424-8068-ff0350f44403/volumes" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.768281 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" path="/var/lib/kubelet/pods/9b8859c7-31db-4617-acb7-096f251ea3ae/volumes" Nov 22 05:12:03 crc kubenswrapper[4948]: I1122 05:12:03.768965 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/keystone-66bcbf78b6-ndxxc"] Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.035642 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.092616 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-erlang-cookie-secret\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.092736 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-pod-info\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.092778 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dlbn\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-kube-api-access-7dlbn\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.092807 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-plugins-conf\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.092836 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-confd\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.092878 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-plugins\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.092990 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.093034 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-erlang-cookie\") pod \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\" (UID: \"8c58a5c1-3e51-491f-af14-9a795b1bdc3c\") " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.093370 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.093486 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.093694 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.093784 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.100707 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-pod-info" (OuterVolumeSpecName: "pod-info") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.106586 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.113047 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a" (OuterVolumeSpecName: "persistence") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.117955 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-kube-api-access-7dlbn" (OuterVolumeSpecName: "kube-api-access-7dlbn") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "kube-api-access-7dlbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.179668 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "8c58a5c1-3e51-491f-af14-9a795b1bdc3c" (UID: "8c58a5c1-3e51-491f-af14-9a795b1bdc3c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.194487 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.194514 4948 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.194527 4948 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-pod-info\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.194536 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dlbn\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-kube-api-access-7dlbn\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.194546 4948 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.194554 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c58a5c1-3e51-491f-af14-9a795b1bdc3c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.194586 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\") on node \"crc\" " Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.207110 4948 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.207519 4948 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a") on node "crc" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.295927 4948 reconciler_common.go:293] "Volume detached for volume \"pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b18c6df9-3e24-4d8d-b535-d13b9318621a\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.425035 4948 generic.go:334] "Generic (PLEG): container finished" podID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerID="6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472" exitCode=0 Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.425097 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/rabbitmq-server-0" event={"ID":"8c58a5c1-3e51-491f-af14-9a795b1bdc3c","Type":"ContainerDied","Data":"6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472"} Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.425517 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/rabbitmq-server-0" event={"ID":"8c58a5c1-3e51-491f-af14-9a795b1bdc3c","Type":"ContainerDied","Data":"717f73d5205e78753ffb71cc1a1d840b016dab25426404b920eb47bfa60b7109"} Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.425138 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/rabbitmq-server-0" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.425592 4948 scope.go:117] "RemoveContainer" containerID="6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.453509 4948 scope.go:117] "RemoveContainer" containerID="8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.481347 4948 scope.go:117] "RemoveContainer" containerID="6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472" Nov 22 05:12:04 crc kubenswrapper[4948]: E1122 05:12:04.481727 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472\": container with ID starting with 6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472 not found: ID does not exist" containerID="6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.481772 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472"} err="failed to get container status \"6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472\": rpc error: code = NotFound desc = could not find container \"6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472\": container with ID starting with 6fe9df298460244e4a7b005d56bd772ddc14170e08470b5c6572a6e23d3c9472 not found: ID does not exist" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.481797 4948 scope.go:117] "RemoveContainer" containerID="8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4" Nov 22 05:12:04 crc kubenswrapper[4948]: E1122 05:12:04.481992 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4\": container with ID starting with 8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4 not found: ID does not exist" containerID="8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.482020 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4"} err="failed to get container status \"8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4\": rpc error: code = NotFound desc = could not find container \"8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4\": container with ID starting with 8aba2b8670bc66ee87da608dac57ef2cca13620e035b70109faa0a66df9a7aa4 not found: ID does not exist" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.488535 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/rabbitmq-server-0"] Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.494087 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/rabbitmq-server-0"] Nov 22 05:12:04 crc kubenswrapper[4948]: E1122 05:12:04.850628 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977 is running failed: container process not found" containerID="e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 22 05:12:04 crc kubenswrapper[4948]: E1122 05:12:04.851413 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977 is running failed: container process not found" containerID="e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 22 05:12:04 crc kubenswrapper[4948]: E1122 05:12:04.851685 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977 is running failed: container process not found" containerID="e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 22 05:12:04 crc kubenswrapper[4948]: E1122 05:12:04.851725 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977 is running failed: container process not found" probeType="Readiness" pod="manila-kuttl-tests/openstack-galera-1" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerName="galera" Nov 22 05:12:04 crc kubenswrapper[4948]: I1122 05:12:04.982970 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.005278 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kolla-config\") pod \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.005449 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-operator-scripts\") pod \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.005564 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.005668 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-secrets\") pod \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.005732 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-default\") pod \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.005788 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-generated\") pod \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.005859 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62fpg\" (UniqueName: \"kubernetes.io/projected/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kube-api-access-62fpg\") pod \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\" (UID: \"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0\") " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.006432 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" (UID: "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.006531 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" (UID: "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.006663 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" (UID: "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.006697 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" (UID: "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.012030 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-secrets" (OuterVolumeSpecName: "secrets") pod "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" (UID: "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.012125 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kube-api-access-62fpg" (OuterVolumeSpecName: "kube-api-access-62fpg") pod "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" (UID: "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0"). InnerVolumeSpecName "kube-api-access-62fpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.019550 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "mysql-db") pod "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" (UID: "c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.054211 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="manila-kuttl-tests/openstack-galera-0" podUID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerName="galera" containerID="cri-o://db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed" gracePeriod=26 Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.107199 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.107261 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.107271 4948 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.107284 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.107294 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.107304 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62fpg\" (UniqueName: \"kubernetes.io/projected/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kube-api-access-62fpg\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.107312 4948 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.117943 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.208178 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.436629 4948 generic.go:334] "Generic (PLEG): container finished" podID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerID="e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" exitCode=0 Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.436678 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-1" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.436683 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-1" event={"ID":"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0","Type":"ContainerDied","Data":"e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977"} Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.436812 4948 scope.go:117] "RemoveContainer" containerID="e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.436940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-1" event={"ID":"c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0","Type":"ContainerDied","Data":"affcd4ee5d462d00d5151c04bd6b2ee25cc193d1a1e4b3f9c1431769246f98da"} Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.478519 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/openstack-galera-1"] Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.480576 4948 scope.go:117] "RemoveContainer" containerID="ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.486205 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/openstack-galera-1"] Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.511903 4948 scope.go:117] "RemoveContainer" containerID="e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" Nov 22 05:12:05 crc kubenswrapper[4948]: E1122 05:12:05.512939 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977\": container with ID starting with e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977 not found: ID does not exist" containerID="e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.512972 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977"} err="failed to get container status \"e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977\": rpc error: code = NotFound desc = could not find container \"e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977\": container with ID starting with e46b2123b8f96ab80a30d29b2785560b0758026d754879a30606bc0b7bf6c977 not found: ID does not exist" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.512997 4948 scope.go:117] "RemoveContainer" containerID="ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51" Nov 22 05:12:05 crc kubenswrapper[4948]: E1122 05:12:05.513826 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51\": container with ID starting with ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51 not found: ID does not exist" containerID="ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.513866 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51"} err="failed to get container status \"ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51\": rpc error: code = NotFound desc = could not find container \"ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51\": container with ID starting with ba4e34571555520babafb91e5f76d53d1aa99a153c50c08a26bbf55d92f32b51 not found: ID does not exist" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.766992 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="271e3d90-f82c-4001-8df0-acf407e4743a" path="/var/lib/kubelet/pods/271e3d90-f82c-4001-8df0-acf407e4743a/volumes" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.768302 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" path="/var/lib/kubelet/pods/8c58a5c1-3e51-491f-af14-9a795b1bdc3c/volumes" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.770085 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" path="/var/lib/kubelet/pods/c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0/volumes" Nov 22 05:12:05 crc kubenswrapper[4948]: I1122 05:12:05.969489 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.124498 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.124574 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct8tc\" (UniqueName: \"kubernetes.io/projected/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kube-api-access-ct8tc\") pod \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.124608 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/edc1757b-b350-47aa-94bd-a3f479b8d0ce-secrets\") pod \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.124659 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-default\") pod \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.124711 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kolla-config\") pod \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.124754 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-operator-scripts\") pod \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.124820 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-generated\") pod \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\" (UID: \"edc1757b-b350-47aa-94bd-a3f479b8d0ce\") " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.125826 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "edc1757b-b350-47aa-94bd-a3f479b8d0ce" (UID: "edc1757b-b350-47aa-94bd-a3f479b8d0ce"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.126434 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "edc1757b-b350-47aa-94bd-a3f479b8d0ce" (UID: "edc1757b-b350-47aa-94bd-a3f479b8d0ce"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.126958 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "edc1757b-b350-47aa-94bd-a3f479b8d0ce" (UID: "edc1757b-b350-47aa-94bd-a3f479b8d0ce"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.127652 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "edc1757b-b350-47aa-94bd-a3f479b8d0ce" (UID: "edc1757b-b350-47aa-94bd-a3f479b8d0ce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.135795 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kube-api-access-ct8tc" (OuterVolumeSpecName: "kube-api-access-ct8tc") pod "edc1757b-b350-47aa-94bd-a3f479b8d0ce" (UID: "edc1757b-b350-47aa-94bd-a3f479b8d0ce"). InnerVolumeSpecName "kube-api-access-ct8tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.136227 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edc1757b-b350-47aa-94bd-a3f479b8d0ce-secrets" (OuterVolumeSpecName: "secrets") pod "edc1757b-b350-47aa-94bd-a3f479b8d0ce" (UID: "edc1757b-b350-47aa-94bd-a3f479b8d0ce"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.149281 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "mysql-db") pod "edc1757b-b350-47aa-94bd-a3f479b8d0ce" (UID: "edc1757b-b350-47aa-94bd-a3f479b8d0ce"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.226091 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.226180 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.226202 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct8tc\" (UniqueName: \"kubernetes.io/projected/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kube-api-access-ct8tc\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.226220 4948 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/edc1757b-b350-47aa-94bd-a3f479b8d0ce-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.226238 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.226258 4948 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.226275 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edc1757b-b350-47aa-94bd-a3f479b8d0ce-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.239177 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.327875 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.452582 4948 generic.go:334] "Generic (PLEG): container finished" podID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerID="db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed" exitCode=0 Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.452660 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-0" event={"ID":"edc1757b-b350-47aa-94bd-a3f479b8d0ce","Type":"ContainerDied","Data":"db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed"} Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.452706 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/openstack-galera-0" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.453217 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/openstack-galera-0" event={"ID":"edc1757b-b350-47aa-94bd-a3f479b8d0ce","Type":"ContainerDied","Data":"1a1b7b694c3b4174f83dc51b00a2e61dd1e90dae1fb82b11281b9babc97d2011"} Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.453272 4948 scope.go:117] "RemoveContainer" containerID="db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.494069 4948 scope.go:117] "RemoveContainer" containerID="02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.504492 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/openstack-galera-0"] Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.509404 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/openstack-galera-0"] Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.524382 4948 scope.go:117] "RemoveContainer" containerID="db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed" Nov 22 05:12:06 crc kubenswrapper[4948]: E1122 05:12:06.525074 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed\": container with ID starting with db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed not found: ID does not exist" containerID="db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.525179 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed"} err="failed to get container status \"db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed\": rpc error: code = NotFound desc = could not find container \"db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed\": container with ID starting with db9d9c5fe62d6437c42583a506602164ac3a1547bd17eb65e3de949ebc2165ed not found: ID does not exist" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.525262 4948 scope.go:117] "RemoveContainer" containerID="02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3" Nov 22 05:12:06 crc kubenswrapper[4948]: E1122 05:12:06.526059 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3\": container with ID starting with 02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3 not found: ID does not exist" containerID="02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3" Nov 22 05:12:06 crc kubenswrapper[4948]: I1122 05:12:06.526116 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3"} err="failed to get container status \"02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3\": rpc error: code = NotFound desc = could not find container \"02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3\": container with ID starting with 02aed647777cae87f297e1dc6ade7103aa192b889d371738ddca7a7449454dd3 not found: ID does not exist" Nov 22 05:12:07 crc kubenswrapper[4948]: I1122 05:12:07.790746 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" path="/var/lib/kubelet/pods/edc1757b-b350-47aa-94bd-a3f479b8d0ce/volumes" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.464218 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9b6mf"] Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465140 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerName="rabbitmq" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465157 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerName="rabbitmq" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465173 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="271e3d90-f82c-4001-8df0-acf407e4743a" containerName="keystone-api" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465181 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="271e3d90-f82c-4001-8df0-acf407e4743a" containerName="keystone-api" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465189 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465197 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465214 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465222 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465232 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="mysql-bootstrap" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465240 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="mysql-bootstrap" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465252 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerName="mysql-bootstrap" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465260 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerName="mysql-bootstrap" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465271 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerName="setup-container" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465281 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerName="setup-container" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465295 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerName="mysql-bootstrap" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465304 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerName="mysql-bootstrap" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465320 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465328 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: E1122 05:12:26.465343 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2605c37d-54b8-4424-8068-ff0350f44403" containerName="memcached" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465351 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2605c37d-54b8-4424-8068-ff0350f44403" containerName="memcached" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465538 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="271e3d90-f82c-4001-8df0-acf407e4743a" containerName="keystone-api" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465558 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="edc1757b-b350-47aa-94bd-a3f479b8d0ce" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465568 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b8859c7-31db-4617-acb7-096f251ea3ae" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465584 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5a6d29f-cfab-405b-9b3f-d95e8a1bacb0" containerName="galera" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465597 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c58a5c1-3e51-491f-af14-9a795b1bdc3c" containerName="rabbitmq" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.465613 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="2605c37d-54b8-4424-8068-ff0350f44403" containerName="memcached" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.466584 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.484226 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9b6mf"] Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.648353 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr4vw\" (UniqueName: \"kubernetes.io/projected/a8c730c8-bf35-42f2-a30c-cf603523fdfc-kube-api-access-cr4vw\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.648614 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-catalog-content\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.648872 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-utilities\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.749958 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-catalog-content\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.750028 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-utilities\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.750068 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr4vw\" (UniqueName: \"kubernetes.io/projected/a8c730c8-bf35-42f2-a30c-cf603523fdfc-kube-api-access-cr4vw\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.751020 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-catalog-content\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.751029 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-utilities\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.783692 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr4vw\" (UniqueName: \"kubernetes.io/projected/a8c730c8-bf35-42f2-a30c-cf603523fdfc-kube-api-access-cr4vw\") pod \"certified-operators-9b6mf\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:26 crc kubenswrapper[4948]: I1122 05:12:26.792701 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:27 crc kubenswrapper[4948]: I1122 05:12:27.265039 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9b6mf"] Nov 22 05:12:27 crc kubenswrapper[4948]: W1122 05:12:27.276583 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8c730c8_bf35_42f2_a30c_cf603523fdfc.slice/crio-d947bb47d7c2fde73506128c2b99e19c8d1ec73fbf610996a7b63fe5bc5a6bb6 WatchSource:0}: Error finding container d947bb47d7c2fde73506128c2b99e19c8d1ec73fbf610996a7b63fe5bc5a6bb6: Status 404 returned error can't find the container with id d947bb47d7c2fde73506128c2b99e19c8d1ec73fbf610996a7b63fe5bc5a6bb6 Nov 22 05:12:27 crc kubenswrapper[4948]: I1122 05:12:27.634540 4948 generic.go:334] "Generic (PLEG): container finished" podID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerID="c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64" exitCode=0 Nov 22 05:12:27 crc kubenswrapper[4948]: I1122 05:12:27.634613 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b6mf" event={"ID":"a8c730c8-bf35-42f2-a30c-cf603523fdfc","Type":"ContainerDied","Data":"c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64"} Nov 22 05:12:27 crc kubenswrapper[4948]: I1122 05:12:27.634918 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b6mf" event={"ID":"a8c730c8-bf35-42f2-a30c-cf603523fdfc","Type":"ContainerStarted","Data":"d947bb47d7c2fde73506128c2b99e19c8d1ec73fbf610996a7b63fe5bc5a6bb6"} Nov 22 05:12:28 crc kubenswrapper[4948]: I1122 05:12:28.643309 4948 generic.go:334] "Generic (PLEG): container finished" podID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerID="a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c" exitCode=0 Nov 22 05:12:28 crc kubenswrapper[4948]: I1122 05:12:28.643625 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b6mf" event={"ID":"a8c730c8-bf35-42f2-a30c-cf603523fdfc","Type":"ContainerDied","Data":"a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c"} Nov 22 05:12:29 crc kubenswrapper[4948]: I1122 05:12:29.650985 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b6mf" event={"ID":"a8c730c8-bf35-42f2-a30c-cf603523fdfc","Type":"ContainerStarted","Data":"397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449"} Nov 22 05:12:29 crc kubenswrapper[4948]: I1122 05:12:29.676731 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9b6mf" podStartSLOduration=2.303351718 podStartE2EDuration="3.676707785s" podCreationTimestamp="2025-11-22 05:12:26 +0000 UTC" firstStartedPulling="2025-11-22 05:12:27.637447641 +0000 UTC m=+1550.323458167" lastFinishedPulling="2025-11-22 05:12:29.010803718 +0000 UTC m=+1551.696814234" observedRunningTime="2025-11-22 05:12:29.673093542 +0000 UTC m=+1552.359104058" watchObservedRunningTime="2025-11-22 05:12:29.676707785 +0000 UTC m=+1552.362718311" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.447286 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/ceph" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.541119 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rvcj\" (UniqueName: \"kubernetes.io/projected/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-kube-api-access-6rvcj\") pod \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.541213 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-run\") pod \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.541295 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-log\") pod \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.541333 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-data\") pod \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\" (UID: \"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754\") " Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.542168 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-run" (OuterVolumeSpecName: "run") pod "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" (UID: "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754"). InnerVolumeSpecName "run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.542189 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-log" (OuterVolumeSpecName: "log") pod "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" (UID: "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754"). InnerVolumeSpecName "log". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.546897 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-kube-api-access-6rvcj" (OuterVolumeSpecName: "kube-api-access-6rvcj") pod "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" (UID: "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754"). InnerVolumeSpecName "kube-api-access-6rvcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.555530 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-data" (OuterVolumeSpecName: "data") pod "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" (UID: "ba3d7d7d-eba9-4f3c-8259-9ec7aab02754"). InnerVolumeSpecName "data". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.643652 4948 reconciler_common.go:293] "Volume detached for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-log\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.643727 4948 reconciler_common.go:293] "Volume detached for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-data\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.643759 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rvcj\" (UniqueName: \"kubernetes.io/projected/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-kube-api-access-6rvcj\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.643783 4948 reconciler_common.go:293] "Volume detached for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754-run\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.681629 4948 generic.go:334] "Generic (PLEG): container finished" podID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" containerID="91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba" exitCode=137 Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.681675 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/ceph" event={"ID":"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754","Type":"ContainerDied","Data":"91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba"} Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.681703 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="manila-kuttl-tests/ceph" event={"ID":"ba3d7d7d-eba9-4f3c-8259-9ec7aab02754","Type":"ContainerDied","Data":"25d198bf2b240f97245bc0550db9b795c308661ca2d99acf6a767c0deee1348d"} Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.681722 4948 scope.go:117] "RemoveContainer" containerID="91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.681720 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="manila-kuttl-tests/ceph" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.711689 4948 scope.go:117] "RemoveContainer" containerID="91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba" Nov 22 05:12:33 crc kubenswrapper[4948]: E1122 05:12:33.713169 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba\": container with ID starting with 91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba not found: ID does not exist" containerID="91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.713251 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba"} err="failed to get container status \"91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba\": rpc error: code = NotFound desc = could not find container \"91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba\": container with ID starting with 91a6f22208a24674786c77261f32cbcf0492bd8b0bb8a64093ea167951c762ba not found: ID does not exist" Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.726454 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["manila-kuttl-tests/ceph"] Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.729832 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["manila-kuttl-tests/ceph"] Nov 22 05:12:33 crc kubenswrapper[4948]: I1122 05:12:33.783088 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" path="/var/lib/kubelet/pods/ba3d7d7d-eba9-4f3c-8259-9ec7aab02754/volumes" Nov 22 05:12:34 crc kubenswrapper[4948]: I1122 05:12:34.698643 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5"] Nov 22 05:12:34 crc kubenswrapper[4948]: I1122 05:12:34.699174 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="manager" containerID="cri-o://e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316" gracePeriod=10 Nov 22 05:12:34 crc kubenswrapper[4948]: I1122 05:12:34.699259 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="kube-rbac-proxy" containerID="cri-o://0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b" gracePeriod=10 Nov 22 05:12:34 crc kubenswrapper[4948]: I1122 05:12:34.903865 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-index-hq6cx"] Nov 22 05:12:34 crc kubenswrapper[4948]: I1122 05:12:34.904066 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/keystone-operator-index-hq6cx" podUID="9af72c60-8ecb-4ffd-af6b-f17019153fb6" containerName="registry-server" containerID="cri-o://8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e" gracePeriod=30 Nov 22 05:12:34 crc kubenswrapper[4948]: I1122 05:12:34.943897 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p"] Nov 22 05:12:34 crc kubenswrapper[4948]: I1122 05:12:34.952513 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/673e18c374f427d47ebf1299aef3a4ea1aab2302d0e987986af30e2621r4q4p"] Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.093874 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.203076 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.265777 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-apiservice-cert\") pod \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.265853 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-webhook-cert\") pod \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.265933 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwcwt\" (UniqueName: \"kubernetes.io/projected/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-kube-api-access-jwcwt\") pod \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\" (UID: \"30513ce9-a925-49b0-b8d8-e9a1eb92bc11\") " Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.272212 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "30513ce9-a925-49b0-b8d8-e9a1eb92bc11" (UID: "30513ce9-a925-49b0-b8d8-e9a1eb92bc11"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.272230 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "30513ce9-a925-49b0-b8d8-e9a1eb92bc11" (UID: "30513ce9-a925-49b0-b8d8-e9a1eb92bc11"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.272261 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-kube-api-access-jwcwt" (OuterVolumeSpecName: "kube-api-access-jwcwt") pod "30513ce9-a925-49b0-b8d8-e9a1eb92bc11" (UID: "30513ce9-a925-49b0-b8d8-e9a1eb92bc11"). InnerVolumeSpecName "kube-api-access-jwcwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.367575 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4lcd\" (UniqueName: \"kubernetes.io/projected/9af72c60-8ecb-4ffd-af6b-f17019153fb6-kube-api-access-j4lcd\") pod \"9af72c60-8ecb-4ffd-af6b-f17019153fb6\" (UID: \"9af72c60-8ecb-4ffd-af6b-f17019153fb6\") " Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.367811 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwcwt\" (UniqueName: \"kubernetes.io/projected/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-kube-api-access-jwcwt\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.367824 4948 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.367833 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/30513ce9-a925-49b0-b8d8-e9a1eb92bc11-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.370321 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9af72c60-8ecb-4ffd-af6b-f17019153fb6-kube-api-access-j4lcd" (OuterVolumeSpecName: "kube-api-access-j4lcd") pod "9af72c60-8ecb-4ffd-af6b-f17019153fb6" (UID: "9af72c60-8ecb-4ffd-af6b-f17019153fb6"). InnerVolumeSpecName "kube-api-access-j4lcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.469022 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4lcd\" (UniqueName: \"kubernetes.io/projected/9af72c60-8ecb-4ffd-af6b-f17019153fb6-kube-api-access-j4lcd\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.698523 4948 generic.go:334] "Generic (PLEG): container finished" podID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerID="0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b" exitCode=0 Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.698558 4948 generic.go:334] "Generic (PLEG): container finished" podID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerID="e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316" exitCode=0 Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.698580 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.698618 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" event={"ID":"30513ce9-a925-49b0-b8d8-e9a1eb92bc11","Type":"ContainerDied","Data":"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b"} Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.698649 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" event={"ID":"30513ce9-a925-49b0-b8d8-e9a1eb92bc11","Type":"ContainerDied","Data":"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316"} Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.698662 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5" event={"ID":"30513ce9-a925-49b0-b8d8-e9a1eb92bc11","Type":"ContainerDied","Data":"075b31fa43676e064f91d733416feb0e17134e984fee60b24a8bcced8a93bb6e"} Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.698685 4948 scope.go:117] "RemoveContainer" containerID="0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.700363 4948 generic.go:334] "Generic (PLEG): container finished" podID="9af72c60-8ecb-4ffd-af6b-f17019153fb6" containerID="8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e" exitCode=0 Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.700385 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-hq6cx" event={"ID":"9af72c60-8ecb-4ffd-af6b-f17019153fb6","Type":"ContainerDied","Data":"8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e"} Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.700403 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-hq6cx" event={"ID":"9af72c60-8ecb-4ffd-af6b-f17019153fb6","Type":"ContainerDied","Data":"3bbaca832c2c7203655f2b1c5d96cf47829cdd50ccae91d87420dd090255ed8b"} Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.700452 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-hq6cx" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.716589 4948 scope.go:117] "RemoveContainer" containerID="e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.742957 4948 scope.go:117] "RemoveContainer" containerID="0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b" Nov 22 05:12:35 crc kubenswrapper[4948]: E1122 05:12:35.743909 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b\": container with ID starting with 0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b not found: ID does not exist" containerID="0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.743942 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b"} err="failed to get container status \"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b\": rpc error: code = NotFound desc = could not find container \"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b\": container with ID starting with 0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b not found: ID does not exist" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.743958 4948 scope.go:117] "RemoveContainer" containerID="e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316" Nov 22 05:12:35 crc kubenswrapper[4948]: E1122 05:12:35.744209 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316\": container with ID starting with e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316 not found: ID does not exist" containerID="e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.744285 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316"} err="failed to get container status \"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316\": rpc error: code = NotFound desc = could not find container \"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316\": container with ID starting with e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316 not found: ID does not exist" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.744348 4948 scope.go:117] "RemoveContainer" containerID="0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.744948 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b"} err="failed to get container status \"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b\": rpc error: code = NotFound desc = could not find container \"0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b\": container with ID starting with 0d0705ba412a68fd77dccd09a2a80ed8469cb12991128fe63c95015a4e39647b not found: ID does not exist" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.744976 4948 scope.go:117] "RemoveContainer" containerID="e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.746125 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316"} err="failed to get container status \"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316\": rpc error: code = NotFound desc = could not find container \"e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316\": container with ID starting with e59b210a68dff2bbb197854907ba098d60d130234610fb65a663b0ee5a901316 not found: ID does not exist" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.746158 4948 scope.go:117] "RemoveContainer" containerID="8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.749705 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5"] Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.766453 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51be5375-cea2-4a66-a424-d850caad872c" path="/var/lib/kubelet/pods/51be5375-cea2-4a66-a424-d850caad872c/volumes" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.767814 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5dfd7c9c5b-7b8x5"] Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.771755 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-index-hq6cx"] Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.777041 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/keystone-operator-index-hq6cx"] Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.783159 4948 scope.go:117] "RemoveContainer" containerID="8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e" Nov 22 05:12:35 crc kubenswrapper[4948]: E1122 05:12:35.783521 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e\": container with ID starting with 8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e not found: ID does not exist" containerID="8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e" Nov 22 05:12:35 crc kubenswrapper[4948]: I1122 05:12:35.783558 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e"} err="failed to get container status \"8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e\": rpc error: code = NotFound desc = could not find container \"8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e\": container with ID starting with 8bd0f8c8fe71a223a4a378551abb222138f3a18871ba671f398078d8fc1c751e not found: ID does not exist" Nov 22 05:12:36 crc kubenswrapper[4948]: I1122 05:12:36.792950 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:36 crc kubenswrapper[4948]: I1122 05:12:36.793418 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:36 crc kubenswrapper[4948]: I1122 05:12:36.858758 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.693507 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j"] Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.693844 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" podUID="38b67bbd-f7f5-44f4-9383-ac7ba57e4554" containerName="operator" containerID="cri-o://5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10" gracePeriod=10 Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.766555 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" path="/var/lib/kubelet/pods/30513ce9-a925-49b0-b8d8-e9a1eb92bc11/volumes" Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.767192 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9af72c60-8ecb-4ffd-af6b-f17019153fb6" path="/var/lib/kubelet/pods/9af72c60-8ecb-4ffd-af6b-f17019153fb6/volumes" Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.787256 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.970538 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-v94v7"] Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.971957 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" podUID="13f8cc32-0ab5-4ad9-be73-8c7b7730983b" containerName="registry-server" containerID="cri-o://87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70" gracePeriod=30 Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.981771 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558"] Nov 22 05:12:37 crc kubenswrapper[4948]: I1122 05:12:37.994664 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5909s558"] Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.075538 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.219172 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55lpv\" (UniqueName: \"kubernetes.io/projected/38b67bbd-f7f5-44f4-9383-ac7ba57e4554-kube-api-access-55lpv\") pod \"38b67bbd-f7f5-44f4-9383-ac7ba57e4554\" (UID: \"38b67bbd-f7f5-44f4-9383-ac7ba57e4554\") " Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.228897 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38b67bbd-f7f5-44f4-9383-ac7ba57e4554-kube-api-access-55lpv" (OuterVolumeSpecName: "kube-api-access-55lpv") pod "38b67bbd-f7f5-44f4-9383-ac7ba57e4554" (UID: "38b67bbd-f7f5-44f4-9383-ac7ba57e4554"). InnerVolumeSpecName "kube-api-access-55lpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.321215 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55lpv\" (UniqueName: \"kubernetes.io/projected/38b67bbd-f7f5-44f4-9383-ac7ba57e4554-kube-api-access-55lpv\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.373122 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.524081 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phknx\" (UniqueName: \"kubernetes.io/projected/13f8cc32-0ab5-4ad9-be73-8c7b7730983b-kube-api-access-phknx\") pod \"13f8cc32-0ab5-4ad9-be73-8c7b7730983b\" (UID: \"13f8cc32-0ab5-4ad9-be73-8c7b7730983b\") " Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.527823 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13f8cc32-0ab5-4ad9-be73-8c7b7730983b-kube-api-access-phknx" (OuterVolumeSpecName: "kube-api-access-phknx") pod "13f8cc32-0ab5-4ad9-be73-8c7b7730983b" (UID: "13f8cc32-0ab5-4ad9-be73-8c7b7730983b"). InnerVolumeSpecName "kube-api-access-phknx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.625669 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phknx\" (UniqueName: \"kubernetes.io/projected/13f8cc32-0ab5-4ad9-be73-8c7b7730983b-kube-api-access-phknx\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.739053 4948 generic.go:334] "Generic (PLEG): container finished" podID="38b67bbd-f7f5-44f4-9383-ac7ba57e4554" containerID="5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10" exitCode=0 Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.739148 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.739167 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" event={"ID":"38b67bbd-f7f5-44f4-9383-ac7ba57e4554","Type":"ContainerDied","Data":"5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10"} Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.739216 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j" event={"ID":"38b67bbd-f7f5-44f4-9383-ac7ba57e4554","Type":"ContainerDied","Data":"7b494bcba36df8c20a18c6b6ab11c0cfcb7f8633cd572f189d38e1da1c880bb5"} Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.739241 4948 scope.go:117] "RemoveContainer" containerID="5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.741970 4948 generic.go:334] "Generic (PLEG): container finished" podID="13f8cc32-0ab5-4ad9-be73-8c7b7730983b" containerID="87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70" exitCode=0 Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.743033 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.743108 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" event={"ID":"13f8cc32-0ab5-4ad9-be73-8c7b7730983b","Type":"ContainerDied","Data":"87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70"} Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.743165 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-v94v7" event={"ID":"13f8cc32-0ab5-4ad9-be73-8c7b7730983b","Type":"ContainerDied","Data":"e9e92002af933c52b554cd3c5a9744deaf6e9baef341e4e813513bcb4317a6a2"} Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.773128 4948 scope.go:117] "RemoveContainer" containerID="5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10" Nov 22 05:12:38 crc kubenswrapper[4948]: E1122 05:12:38.773675 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10\": container with ID starting with 5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10 not found: ID does not exist" containerID="5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.773771 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10"} err="failed to get container status \"5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10\": rpc error: code = NotFound desc = could not find container \"5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10\": container with ID starting with 5f658b59ca707fc4ceac7d7e67a8cf1263de622b9d919bd4a7061f7857529a10 not found: ID does not exist" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.773819 4948 scope.go:117] "RemoveContainer" containerID="87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.786129 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-v94v7"] Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.791068 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-v94v7"] Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.799531 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j"] Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.800807 4948 scope.go:117] "RemoveContainer" containerID="87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70" Nov 22 05:12:38 crc kubenswrapper[4948]: E1122 05:12:38.801246 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70\": container with ID starting with 87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70 not found: ID does not exist" containerID="87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.801298 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70"} err="failed to get container status \"87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70\": rpc error: code = NotFound desc = could not find container \"87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70\": container with ID starting with 87c482562c796e9cc3d5d67af1c9fbda79beffc2d432349c671a3c66e74b7b70 not found: ID does not exist" Nov 22 05:12:38 crc kubenswrapper[4948]: I1122 05:12:38.803862 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-wcc9j"] Nov 22 05:12:39 crc kubenswrapper[4948]: I1122 05:12:39.773170 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13f8cc32-0ab5-4ad9-be73-8c7b7730983b" path="/var/lib/kubelet/pods/13f8cc32-0ab5-4ad9-be73-8c7b7730983b/volumes" Nov 22 05:12:39 crc kubenswrapper[4948]: I1122 05:12:39.774163 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38b67bbd-f7f5-44f4-9383-ac7ba57e4554" path="/var/lib/kubelet/pods/38b67bbd-f7f5-44f4-9383-ac7ba57e4554/volumes" Nov 22 05:12:39 crc kubenswrapper[4948]: I1122 05:12:39.775288 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82190887-0d93-4244-ae49-fcb691f23f0c" path="/var/lib/kubelet/pods/82190887-0d93-4244-ae49-fcb691f23f0c/volumes" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.253720 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9b6mf"] Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.254565 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9b6mf" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="registry-server" containerID="cri-o://397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449" gracePeriod=2 Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.613212 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.753904 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-catalog-content\") pod \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.753985 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cr4vw\" (UniqueName: \"kubernetes.io/projected/a8c730c8-bf35-42f2-a30c-cf603523fdfc-kube-api-access-cr4vw\") pod \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.754005 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-utilities\") pod \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\" (UID: \"a8c730c8-bf35-42f2-a30c-cf603523fdfc\") " Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.755585 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-utilities" (OuterVolumeSpecName: "utilities") pod "a8c730c8-bf35-42f2-a30c-cf603523fdfc" (UID: "a8c730c8-bf35-42f2-a30c-cf603523fdfc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.764706 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c730c8-bf35-42f2-a30c-cf603523fdfc-kube-api-access-cr4vw" (OuterVolumeSpecName: "kube-api-access-cr4vw") pod "a8c730c8-bf35-42f2-a30c-cf603523fdfc" (UID: "a8c730c8-bf35-42f2-a30c-cf603523fdfc"). InnerVolumeSpecName "kube-api-access-cr4vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.779387 4948 generic.go:334] "Generic (PLEG): container finished" podID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerID="397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449" exitCode=0 Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.779461 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b6mf" event={"ID":"a8c730c8-bf35-42f2-a30c-cf603523fdfc","Type":"ContainerDied","Data":"397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449"} Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.779511 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b6mf" event={"ID":"a8c730c8-bf35-42f2-a30c-cf603523fdfc","Type":"ContainerDied","Data":"d947bb47d7c2fde73506128c2b99e19c8d1ec73fbf610996a7b63fe5bc5a6bb6"} Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.779535 4948 scope.go:117] "RemoveContainer" containerID="397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.779589 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b6mf" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.803671 4948 scope.go:117] "RemoveContainer" containerID="a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.816355 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a8c730c8-bf35-42f2-a30c-cf603523fdfc" (UID: "a8c730c8-bf35-42f2-a30c-cf603523fdfc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.821336 4948 scope.go:117] "RemoveContainer" containerID="c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.836520 4948 scope.go:117] "RemoveContainer" containerID="397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449" Nov 22 05:12:40 crc kubenswrapper[4948]: E1122 05:12:40.836999 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449\": container with ID starting with 397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449 not found: ID does not exist" containerID="397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.837058 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449"} err="failed to get container status \"397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449\": rpc error: code = NotFound desc = could not find container \"397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449\": container with ID starting with 397e0c3887b1e6a7227399b8cb917d2cd1358452b798dd9457e2039095ad1449 not found: ID does not exist" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.837087 4948 scope.go:117] "RemoveContainer" containerID="a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c" Nov 22 05:12:40 crc kubenswrapper[4948]: E1122 05:12:40.837580 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c\": container with ID starting with a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c not found: ID does not exist" containerID="a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.837648 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c"} err="failed to get container status \"a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c\": rpc error: code = NotFound desc = could not find container \"a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c\": container with ID starting with a5d9d53fef1a0b378648c785cb5322153a7ab6dbef363c0926b6598fc6b8857c not found: ID does not exist" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.837689 4948 scope.go:117] "RemoveContainer" containerID="c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64" Nov 22 05:12:40 crc kubenswrapper[4948]: E1122 05:12:40.838761 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64\": container with ID starting with c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64 not found: ID does not exist" containerID="c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.839554 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64"} err="failed to get container status \"c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64\": rpc error: code = NotFound desc = could not find container \"c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64\": container with ID starting with c99e6dc13bcfacd176e418c371f21bf85b923e21f5af7989a0e375aa979fcb64 not found: ID does not exist" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.855282 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.855418 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cr4vw\" (UniqueName: \"kubernetes.io/projected/a8c730c8-bf35-42f2-a30c-cf603523fdfc-kube-api-access-cr4vw\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:40 crc kubenswrapper[4948]: I1122 05:12:40.855514 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c730c8-bf35-42f2-a30c-cf603523fdfc-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.117106 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9b6mf"] Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.121136 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9b6mf"] Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.652995 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92"] Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.653366 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="manager" containerID="cri-o://5e9fc957d3e1d7e44ff5f79c185e1e76b71e4167883350cc7d86ddb3789c860a" gracePeriod=10 Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.653478 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="kube-rbac-proxy" containerID="cri-o://facc8f6c0e0406f0753944f4d4dadff7e2665fdf9707e08eba9cc5251ac5c036" gracePeriod=10 Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.776442 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" path="/var/lib/kubelet/pods/a8c730c8-bf35-42f2-a30c-cf603523fdfc/volumes" Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.797766 4948 generic.go:334] "Generic (PLEG): container finished" podID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerID="facc8f6c0e0406f0753944f4d4dadff7e2665fdf9707e08eba9cc5251ac5c036" exitCode=0 Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.797793 4948 generic.go:334] "Generic (PLEG): container finished" podID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerID="5e9fc957d3e1d7e44ff5f79c185e1e76b71e4167883350cc7d86ddb3789c860a" exitCode=0 Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.797855 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" event={"ID":"04e5f997-9e92-49d0-9bd2-8635681683cb","Type":"ContainerDied","Data":"facc8f6c0e0406f0753944f4d4dadff7e2665fdf9707e08eba9cc5251ac5c036"} Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.797881 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" event={"ID":"04e5f997-9e92-49d0-9bd2-8635681683cb","Type":"ContainerDied","Data":"5e9fc957d3e1d7e44ff5f79c185e1e76b71e4167883350cc7d86ddb3789c860a"} Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.881298 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-hmxsc"] Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.881465 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-index-hmxsc" podUID="9a14e009-500d-4277-9fde-27db0ed6f943" containerName="registry-server" containerID="cri-o://bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" gracePeriod=30 Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.910520 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn"] Nov 22 05:12:41 crc kubenswrapper[4948]: I1122 05:12:41.913165 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/8ad853bb090b3a36d4332bde14850afdc9872f13e5d95ff50094430eb6ww2vn"] Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.060354 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:12:42 crc kubenswrapper[4948]: E1122 05:12:42.128648 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3 is running failed: container process not found" containerID="bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 05:12:42 crc kubenswrapper[4948]: E1122 05:12:42.129171 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3 is running failed: container process not found" containerID="bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 05:12:42 crc kubenswrapper[4948]: E1122 05:12:42.131642 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3 is running failed: container process not found" containerID="bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 05:12:42 crc kubenswrapper[4948]: E1122 05:12:42.131715 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3 is running failed: container process not found" probeType="Readiness" pod="openstack-operators/infra-operator-index-hmxsc" podUID="9a14e009-500d-4277-9fde-27db0ed6f943" containerName="registry-server" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.175802 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-apiservice-cert\") pod \"04e5f997-9e92-49d0-9bd2-8635681683cb\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.175853 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-webhook-cert\") pod \"04e5f997-9e92-49d0-9bd2-8635681683cb\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.175884 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5q9v\" (UniqueName: \"kubernetes.io/projected/04e5f997-9e92-49d0-9bd2-8635681683cb-kube-api-access-r5q9v\") pod \"04e5f997-9e92-49d0-9bd2-8635681683cb\" (UID: \"04e5f997-9e92-49d0-9bd2-8635681683cb\") " Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.180643 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04e5f997-9e92-49d0-9bd2-8635681683cb-kube-api-access-r5q9v" (OuterVolumeSpecName: "kube-api-access-r5q9v") pod "04e5f997-9e92-49d0-9bd2-8635681683cb" (UID: "04e5f997-9e92-49d0-9bd2-8635681683cb"). InnerVolumeSpecName "kube-api-access-r5q9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.180677 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "04e5f997-9e92-49d0-9bd2-8635681683cb" (UID: "04e5f997-9e92-49d0-9bd2-8635681683cb"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.180733 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "04e5f997-9e92-49d0-9bd2-8635681683cb" (UID: "04e5f997-9e92-49d0-9bd2-8635681683cb"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.208957 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.277654 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.277697 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5q9v\" (UniqueName: \"kubernetes.io/projected/04e5f997-9e92-49d0-9bd2-8635681683cb-kube-api-access-r5q9v\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.277743 4948 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04e5f997-9e92-49d0-9bd2-8635681683cb-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.378175 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrqhc\" (UniqueName: \"kubernetes.io/projected/9a14e009-500d-4277-9fde-27db0ed6f943-kube-api-access-lrqhc\") pod \"9a14e009-500d-4277-9fde-27db0ed6f943\" (UID: \"9a14e009-500d-4277-9fde-27db0ed6f943\") " Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.381383 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a14e009-500d-4277-9fde-27db0ed6f943-kube-api-access-lrqhc" (OuterVolumeSpecName: "kube-api-access-lrqhc") pod "9a14e009-500d-4277-9fde-27db0ed6f943" (UID: "9a14e009-500d-4277-9fde-27db0ed6f943"). InnerVolumeSpecName "kube-api-access-lrqhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.479498 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrqhc\" (UniqueName: \"kubernetes.io/projected/9a14e009-500d-4277-9fde-27db0ed6f943-kube-api-access-lrqhc\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.814619 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" event={"ID":"04e5f997-9e92-49d0-9bd2-8635681683cb","Type":"ContainerDied","Data":"a926a3ac61e5f8164e38a96b945e05b7c607238158110b3bf7fedf059b9294d4"} Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.815996 4948 scope.go:117] "RemoveContainer" containerID="facc8f6c0e0406f0753944f4d4dadff7e2665fdf9707e08eba9cc5251ac5c036" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.816067 4948 generic.go:334] "Generic (PLEG): container finished" podID="9a14e009-500d-4277-9fde-27db0ed6f943" containerID="bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" exitCode=0 Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.814631 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.816116 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hmxsc" event={"ID":"9a14e009-500d-4277-9fde-27db0ed6f943","Type":"ContainerDied","Data":"bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3"} Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.816164 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hmxsc" event={"ID":"9a14e009-500d-4277-9fde-27db0ed6f943","Type":"ContainerDied","Data":"10ebe5a358082beee6b1b058e1a9940207d7c2c55923e98cfb2b0ee0c9b2aee6"} Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.817399 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hmxsc" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.835937 4948 scope.go:117] "RemoveContainer" containerID="5e9fc957d3e1d7e44ff5f79c185e1e76b71e4167883350cc7d86ddb3789c860a" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.853592 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92"] Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.857554 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7c4897d696-rxn92"] Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.859195 4948 scope.go:117] "RemoveContainer" containerID="bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.868378 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-hmxsc"] Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.875193 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-index-hmxsc"] Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.882490 4948 scope.go:117] "RemoveContainer" containerID="bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" Nov 22 05:12:42 crc kubenswrapper[4948]: E1122 05:12:42.882898 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3\": container with ID starting with bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3 not found: ID does not exist" containerID="bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.882937 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3"} err="failed to get container status \"bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3\": rpc error: code = NotFound desc = could not find container \"bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3\": container with ID starting with bbaf72d5fbcfc807b433aebadd2e10956dcb0cd928c96aeed6e90b56b95a16e3 not found: ID does not exist" Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.980005 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq"] Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.980176 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="kube-rbac-proxy" containerID="cri-o://c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c" gracePeriod=10 Nov 22 05:12:42 crc kubenswrapper[4948]: I1122 05:12:42.980272 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="manager" containerID="cri-o://bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb" gracePeriod=10 Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.269165 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-pntvg"] Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.269830 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-index-pntvg" podUID="71947045-a3b7-4dca-a2e6-c421d6328bc1" containerName="registry-server" containerID="cri-o://8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154" gracePeriod=30 Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.293227 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf"] Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.301991 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/798531b9d9a078af26f5f153dd8093f0980ac32bb052a41050c010ef74ldvtf"] Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.379967 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.492906 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-apiservice-cert\") pod \"f4b54cb5-1843-4f85-abbb-37274329a537\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.492998 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-webhook-cert\") pod \"f4b54cb5-1843-4f85-abbb-37274329a537\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.493120 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7zqm\" (UniqueName: \"kubernetes.io/projected/f4b54cb5-1843-4f85-abbb-37274329a537-kube-api-access-t7zqm\") pod \"f4b54cb5-1843-4f85-abbb-37274329a537\" (UID: \"f4b54cb5-1843-4f85-abbb-37274329a537\") " Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.500417 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "f4b54cb5-1843-4f85-abbb-37274329a537" (UID: "f4b54cb5-1843-4f85-abbb-37274329a537"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.500497 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "f4b54cb5-1843-4f85-abbb-37274329a537" (UID: "f4b54cb5-1843-4f85-abbb-37274329a537"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.502649 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4b54cb5-1843-4f85-abbb-37274329a537-kube-api-access-t7zqm" (OuterVolumeSpecName: "kube-api-access-t7zqm") pod "f4b54cb5-1843-4f85-abbb-37274329a537" (UID: "f4b54cb5-1843-4f85-abbb-37274329a537"). InnerVolumeSpecName "kube-api-access-t7zqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.594932 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7zqm\" (UniqueName: \"kubernetes.io/projected/f4b54cb5-1843-4f85-abbb-37274329a537-kube-api-access-t7zqm\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.594998 4948 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.595012 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4b54cb5-1843-4f85-abbb-37274329a537-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.603253 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.696044 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tn6gg\" (UniqueName: \"kubernetes.io/projected/71947045-a3b7-4dca-a2e6-c421d6328bc1-kube-api-access-tn6gg\") pod \"71947045-a3b7-4dca-a2e6-c421d6328bc1\" (UID: \"71947045-a3b7-4dca-a2e6-c421d6328bc1\") " Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.699853 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71947045-a3b7-4dca-a2e6-c421d6328bc1-kube-api-access-tn6gg" (OuterVolumeSpecName: "kube-api-access-tn6gg") pod "71947045-a3b7-4dca-a2e6-c421d6328bc1" (UID: "71947045-a3b7-4dca-a2e6-c421d6328bc1"). InnerVolumeSpecName "kube-api-access-tn6gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.764502 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" path="/var/lib/kubelet/pods/04e5f997-9e92-49d0-9bd2-8635681683cb/volumes" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.765047 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="222cae05-41c0-4d5c-bdd8-2d8f682cf4d2" path="/var/lib/kubelet/pods/222cae05-41c0-4d5c-bdd8-2d8f682cf4d2/volumes" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.765638 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6de108e0-d060-402e-8ad0-f52f89e5f155" path="/var/lib/kubelet/pods/6de108e0-d060-402e-8ad0-f52f89e5f155/volumes" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.766697 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a14e009-500d-4277-9fde-27db0ed6f943" path="/var/lib/kubelet/pods/9a14e009-500d-4277-9fde-27db0ed6f943/volumes" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.797728 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tn6gg\" (UniqueName: \"kubernetes.io/projected/71947045-a3b7-4dca-a2e6-c421d6328bc1-kube-api-access-tn6gg\") on node \"crc\" DevicePath \"\"" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.826656 4948 generic.go:334] "Generic (PLEG): container finished" podID="71947045-a3b7-4dca-a2e6-c421d6328bc1" containerID="8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154" exitCode=0 Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.826728 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-pntvg" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.826725 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-pntvg" event={"ID":"71947045-a3b7-4dca-a2e6-c421d6328bc1","Type":"ContainerDied","Data":"8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154"} Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.826783 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-pntvg" event={"ID":"71947045-a3b7-4dca-a2e6-c421d6328bc1","Type":"ContainerDied","Data":"887d0a0145266ef03fe8d7aa1961265f5a8444674f3de8731cd3f55da6e9c6d8"} Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.826805 4948 scope.go:117] "RemoveContainer" containerID="8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.831497 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b54cb5-1843-4f85-abbb-37274329a537" containerID="bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb" exitCode=0 Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.831529 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b54cb5-1843-4f85-abbb-37274329a537" containerID="c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c" exitCode=0 Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.831551 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" event={"ID":"f4b54cb5-1843-4f85-abbb-37274329a537","Type":"ContainerDied","Data":"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb"} Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.831580 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" event={"ID":"f4b54cb5-1843-4f85-abbb-37274329a537","Type":"ContainerDied","Data":"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c"} Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.831578 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.831591 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq" event={"ID":"f4b54cb5-1843-4f85-abbb-37274329a537","Type":"ContainerDied","Data":"20049e7f46f68341995f40792be2fa9e65f2a7cd64e7dc2503d3d01f4231579e"} Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.854277 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq"] Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.861268 4948 scope.go:117] "RemoveContainer" containerID="8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154" Nov 22 05:12:43 crc kubenswrapper[4948]: E1122 05:12:43.861808 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154\": container with ID starting with 8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154 not found: ID does not exist" containerID="8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.861844 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154"} err="failed to get container status \"8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154\": rpc error: code = NotFound desc = could not find container \"8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154\": container with ID starting with 8826f38f7b5fced7e2ebc4c57e469dec4d7f254c4940532999b92e3af4e37154 not found: ID does not exist" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.861870 4948 scope.go:117] "RemoveContainer" containerID="bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.868367 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-74445689fd-8dbqq"] Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.876670 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-pntvg"] Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.880913 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-index-pntvg"] Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.885119 4948 scope.go:117] "RemoveContainer" containerID="c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.902234 4948 scope.go:117] "RemoveContainer" containerID="bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb" Nov 22 05:12:43 crc kubenswrapper[4948]: E1122 05:12:43.902610 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb\": container with ID starting with bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb not found: ID does not exist" containerID="bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.902647 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb"} err="failed to get container status \"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb\": rpc error: code = NotFound desc = could not find container \"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb\": container with ID starting with bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb not found: ID does not exist" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.902667 4948 scope.go:117] "RemoveContainer" containerID="c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c" Nov 22 05:12:43 crc kubenswrapper[4948]: E1122 05:12:43.903025 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c\": container with ID starting with c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c not found: ID does not exist" containerID="c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.903046 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c"} err="failed to get container status \"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c\": rpc error: code = NotFound desc = could not find container \"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c\": container with ID starting with c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c not found: ID does not exist" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.903057 4948 scope.go:117] "RemoveContainer" containerID="bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.903290 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb"} err="failed to get container status \"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb\": rpc error: code = NotFound desc = could not find container \"bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb\": container with ID starting with bdd726f5a7b221b1cb68d515cd52cb379bf4b162d9d585cac04344297c6657fb not found: ID does not exist" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.903309 4948 scope.go:117] "RemoveContainer" containerID="c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c" Nov 22 05:12:43 crc kubenswrapper[4948]: I1122 05:12:43.903675 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c"} err="failed to get container status \"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c\": rpc error: code = NotFound desc = could not find container \"c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c\": container with ID starting with c52813d3fd39e5c2ce53f41aed4b3d0ac02ba34dd1b64e148a2e579fc90cbc5c not found: ID does not exist" Nov 22 05:12:45 crc kubenswrapper[4948]: I1122 05:12:45.773066 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71947045-a3b7-4dca-a2e6-c421d6328bc1" path="/var/lib/kubelet/pods/71947045-a3b7-4dca-a2e6-c421d6328bc1/volumes" Nov 22 05:12:45 crc kubenswrapper[4948]: I1122 05:12:45.774661 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" path="/var/lib/kubelet/pods/f4b54cb5-1843-4f85-abbb-37274329a537/volumes" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.661829 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4hf65"] Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662513 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662535 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662556 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38b67bbd-f7f5-44f4-9383-ac7ba57e4554" containerName="operator" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662568 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="38b67bbd-f7f5-44f4-9383-ac7ba57e4554" containerName="operator" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662581 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a14e009-500d-4277-9fde-27db0ed6f943" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662591 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a14e009-500d-4277-9fde-27db0ed6f943" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662607 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662616 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662630 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662639 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662660 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662671 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662687 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="extract-utilities" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662697 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="extract-utilities" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662712 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="extract-content" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662722 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="extract-content" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662736 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13f8cc32-0ab5-4ad9-be73-8c7b7730983b" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662748 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f8cc32-0ab5-4ad9-be73-8c7b7730983b" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662759 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662770 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662784 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662794 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662810 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71947045-a3b7-4dca-a2e6-c421d6328bc1" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662819 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="71947045-a3b7-4dca-a2e6-c421d6328bc1" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662842 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662852 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662867 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af72c60-8ecb-4ffd-af6b-f17019153fb6" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662878 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af72c60-8ecb-4ffd-af6b-f17019153fb6" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: E1122 05:12:46.662889 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" containerName="ceph" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.662900 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" containerName="ceph" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663056 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="38b67bbd-f7f5-44f4-9383-ac7ba57e4554" containerName="operator" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663072 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba3d7d7d-eba9-4f3c-8259-9ec7aab02754" containerName="ceph" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663088 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663107 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663124 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c730c8-bf35-42f2-a30c-cf603523fdfc" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663138 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9af72c60-8ecb-4ffd-af6b-f17019153fb6" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663152 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e5f997-9e92-49d0-9bd2-8635681683cb" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663165 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="71947045-a3b7-4dca-a2e6-c421d6328bc1" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663178 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663192 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b54cb5-1843-4f85-abbb-37274329a537" containerName="kube-rbac-proxy" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663206 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="13f8cc32-0ab5-4ad9-be73-8c7b7730983b" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663221 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a14e009-500d-4277-9fde-27db0ed6f943" containerName="registry-server" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.663231 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="30513ce9-a925-49b0-b8d8-e9a1eb92bc11" containerName="manager" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.664382 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.692497 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4hf65"] Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.842730 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-utilities\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.842781 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-catalog-content\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.842856 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28zpf\" (UniqueName: \"kubernetes.io/projected/4b243286-ae34-4a43-91e7-e7d2ca0507bf-kube-api-access-28zpf\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.943819 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28zpf\" (UniqueName: \"kubernetes.io/projected/4b243286-ae34-4a43-91e7-e7d2ca0507bf-kube-api-access-28zpf\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.943932 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-utilities\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.943984 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-catalog-content\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.944446 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-catalog-content\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.944636 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-utilities\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:46 crc kubenswrapper[4948]: I1122 05:12:46.969639 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28zpf\" (UniqueName: \"kubernetes.io/projected/4b243286-ae34-4a43-91e7-e7d2ca0507bf-kube-api-access-28zpf\") pod \"redhat-operators-4hf65\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:47 crc kubenswrapper[4948]: I1122 05:12:47.007821 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:47 crc kubenswrapper[4948]: I1122 05:12:47.443798 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4hf65"] Nov 22 05:12:47 crc kubenswrapper[4948]: I1122 05:12:47.868979 4948 generic.go:334] "Generic (PLEG): container finished" podID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerID="66345be45d3d94b61c093ddf116864f19755f885d4eebb0bc7a59db71436bf98" exitCode=0 Nov 22 05:12:47 crc kubenswrapper[4948]: I1122 05:12:47.869083 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4hf65" event={"ID":"4b243286-ae34-4a43-91e7-e7d2ca0507bf","Type":"ContainerDied","Data":"66345be45d3d94b61c093ddf116864f19755f885d4eebb0bc7a59db71436bf98"} Nov 22 05:12:47 crc kubenswrapper[4948]: I1122 05:12:47.869309 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4hf65" event={"ID":"4b243286-ae34-4a43-91e7-e7d2ca0507bf","Type":"ContainerStarted","Data":"f0525985f9efe9cd99e0c0e2649b0d101f1a769f566683fbb4e762c315d823a8"} Nov 22 05:12:49 crc kubenswrapper[4948]: I1122 05:12:49.889949 4948 generic.go:334] "Generic (PLEG): container finished" podID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerID="327ae8479d7fbcfeaaa68d82e7b0fc6700417663960a908ad840b367cfbb1c83" exitCode=0 Nov 22 05:12:49 crc kubenswrapper[4948]: I1122 05:12:49.890021 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4hf65" event={"ID":"4b243286-ae34-4a43-91e7-e7d2ca0507bf","Type":"ContainerDied","Data":"327ae8479d7fbcfeaaa68d82e7b0fc6700417663960a908ad840b367cfbb1c83"} Nov 22 05:12:51 crc kubenswrapper[4948]: I1122 05:12:51.910677 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4hf65" event={"ID":"4b243286-ae34-4a43-91e7-e7d2ca0507bf","Type":"ContainerStarted","Data":"0e9ddf7a8b238fea8cf0e1327b7494658c8be771b5645a07b921efbfb16b2f47"} Nov 22 05:12:51 crc kubenswrapper[4948]: I1122 05:12:51.932839 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4hf65" podStartSLOduration=3.113810898 podStartE2EDuration="5.932815355s" podCreationTimestamp="2025-11-22 05:12:46 +0000 UTC" firstStartedPulling="2025-11-22 05:12:47.871305193 +0000 UTC m=+1570.557315709" lastFinishedPulling="2025-11-22 05:12:50.69030961 +0000 UTC m=+1573.376320166" observedRunningTime="2025-11-22 05:12:51.932397763 +0000 UTC m=+1574.618408299" watchObservedRunningTime="2025-11-22 05:12:51.932815355 +0000 UTC m=+1574.618825911" Nov 22 05:12:56 crc kubenswrapper[4948]: I1122 05:12:56.845349 4948 scope.go:117] "RemoveContainer" containerID="de7c2ad9bf7f134a72420b8ddf1d510937659832a4e061d216875174d2864188" Nov 22 05:12:56 crc kubenswrapper[4948]: I1122 05:12:56.866843 4948 scope.go:117] "RemoveContainer" containerID="dc3a0c45e74648a639cdd91acdb2c2134f423b4c9dc127925ba3a516fb454544" Nov 22 05:12:56 crc kubenswrapper[4948]: I1122 05:12:56.916002 4948 scope.go:117] "RemoveContainer" containerID="5fc40c3eac6d4a3cb84579893c2ee0eb5849d8f3c9c087c53b2b8487aad10ec2" Nov 22 05:12:56 crc kubenswrapper[4948]: I1122 05:12:56.937329 4948 scope.go:117] "RemoveContainer" containerID="d6275a7e4a6def06541aaa05ec7304000dc52dbfb86333c23ebe7adddfe0d3ac" Nov 22 05:12:56 crc kubenswrapper[4948]: I1122 05:12:56.962296 4948 scope.go:117] "RemoveContainer" containerID="f55ff0d0e7d49582d342dd8159a7f4cd59f36a614451579f2daa83a99d775c2c" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.007231 4948 scope.go:117] "RemoveContainer" containerID="81b80e5ff0f4b9842b2f38e86477d8a89de33250f984699eac217c7493755414" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.007935 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.008194 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.025303 4948 scope.go:117] "RemoveContainer" containerID="cb60cefb26d31097f53e3c849669eaf0a1d9a87d57effa58e3bc8c66e86f83b4" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.039923 4948 scope.go:117] "RemoveContainer" containerID="69f492b59970a6586ed2c0f4211951c4c376c8b7a322a4a1371d08fc3f9bc6c7" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.052868 4948 scope.go:117] "RemoveContainer" containerID="21d58ebe739638e60ac63c8c62e34bc9b1be3ab2de4fdf0ede14b6342a302623" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.067103 4948 scope.go:117] "RemoveContainer" containerID="7bc741ac3b487b112e01588019322a1d6ec70fa313fd764d368a20779256e958" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.082386 4948 scope.go:117] "RemoveContainer" containerID="fd80e05a440781d462ad691a501ecc23bfcc03e89eba413f2be4af1f0b8ad470" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.098543 4948 scope.go:117] "RemoveContainer" containerID="d84d48cb390546092b493267142c741b016bdebd8d244445b8106b8aeb971361" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.118226 4948 scope.go:117] "RemoveContainer" containerID="0e9091a8d798c540d74964a3bf68639ff34b916a2aff5bd2e7d453f3c380b82b" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.140183 4948 scope.go:117] "RemoveContainer" containerID="6098e827df5b4893e8a405ba14dc8cad649c37495a422494d31429fad77df783" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.161527 4948 scope.go:117] "RemoveContainer" containerID="8b0bdf16d7effd19a699ab370e963e192a8e042fa9c96b0668e63d8808984c90" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.178780 4948 scope.go:117] "RemoveContainer" containerID="1fa977130fa41ff8fc0923b1c393c05385e1c144fd647e13bcbb3cc67369fa39" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.198004 4948 scope.go:117] "RemoveContainer" containerID="b24bcd9db4b450e8141e73cb00a9c2ea5b3440f71607cf386fcb14af33430722" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.212346 4948 scope.go:117] "RemoveContainer" containerID="b0ec0f8e16ca129f740a2cbc1055abaf33e10320a61b4b39de9c90b75b0bbd36" Nov 22 05:12:57 crc kubenswrapper[4948]: I1122 05:12:57.229905 4948 scope.go:117] "RemoveContainer" containerID="0e13ae764c94b80cc837dbca0c434f0e97ced9137585a03fc359daa35cd766fa" Nov 22 05:12:58 crc kubenswrapper[4948]: I1122 05:12:58.055990 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4hf65" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="registry-server" probeResult="failure" output=< Nov 22 05:12:58 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Nov 22 05:12:58 crc kubenswrapper[4948]: > Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.523621 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-cqz2s/must-gather-6dkj7"] Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.525177 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.529801 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-cqz2s"/"openshift-service-ca.crt" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.530115 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-cqz2s"/"kube-root-ca.crt" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.548867 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-cqz2s/must-gather-6dkj7"] Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.720079 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmp42\" (UniqueName: \"kubernetes.io/projected/0298f45b-e114-423e-883c-fcae82858f10-kube-api-access-vmp42\") pod \"must-gather-6dkj7\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.720155 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0298f45b-e114-423e-883c-fcae82858f10-must-gather-output\") pod \"must-gather-6dkj7\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.822052 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0298f45b-e114-423e-883c-fcae82858f10-must-gather-output\") pod \"must-gather-6dkj7\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.822141 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmp42\" (UniqueName: \"kubernetes.io/projected/0298f45b-e114-423e-883c-fcae82858f10-kube-api-access-vmp42\") pod \"must-gather-6dkj7\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.822926 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0298f45b-e114-423e-883c-fcae82858f10-must-gather-output\") pod \"must-gather-6dkj7\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.841884 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmp42\" (UniqueName: \"kubernetes.io/projected/0298f45b-e114-423e-883c-fcae82858f10-kube-api-access-vmp42\") pod \"must-gather-6dkj7\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:06 crc kubenswrapper[4948]: I1122 05:13:06.846232 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:13:07 crc kubenswrapper[4948]: I1122 05:13:07.046598 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:13:07 crc kubenswrapper[4948]: I1122 05:13:07.092638 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:13:07 crc kubenswrapper[4948]: I1122 05:13:07.242021 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-cqz2s/must-gather-6dkj7"] Nov 22 05:13:07 crc kubenswrapper[4948]: I1122 05:13:07.274865 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4hf65"] Nov 22 05:13:08 crc kubenswrapper[4948]: I1122 05:13:08.033246 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" event={"ID":"0298f45b-e114-423e-883c-fcae82858f10","Type":"ContainerStarted","Data":"e8ad7d04ece01c49113dc6aad45cba040aa973585a418f8721e96130ebd2988a"} Nov 22 05:13:09 crc kubenswrapper[4948]: I1122 05:13:09.040667 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4hf65" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="registry-server" containerID="cri-o://0e9ddf7a8b238fea8cf0e1327b7494658c8be771b5645a07b921efbfb16b2f47" gracePeriod=2 Nov 22 05:13:10 crc kubenswrapper[4948]: I1122 05:13:10.048892 4948 generic.go:334] "Generic (PLEG): container finished" podID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerID="0e9ddf7a8b238fea8cf0e1327b7494658c8be771b5645a07b921efbfb16b2f47" exitCode=0 Nov 22 05:13:10 crc kubenswrapper[4948]: I1122 05:13:10.048964 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4hf65" event={"ID":"4b243286-ae34-4a43-91e7-e7d2ca0507bf","Type":"ContainerDied","Data":"0e9ddf7a8b238fea8cf0e1327b7494658c8be771b5645a07b921efbfb16b2f47"} Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.281387 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.321184 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28zpf\" (UniqueName: \"kubernetes.io/projected/4b243286-ae34-4a43-91e7-e7d2ca0507bf-kube-api-access-28zpf\") pod \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.321266 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-utilities\") pod \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.321322 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-catalog-content\") pod \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\" (UID: \"4b243286-ae34-4a43-91e7-e7d2ca0507bf\") " Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.323167 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-utilities" (OuterVolumeSpecName: "utilities") pod "4b243286-ae34-4a43-91e7-e7d2ca0507bf" (UID: "4b243286-ae34-4a43-91e7-e7d2ca0507bf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.328805 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b243286-ae34-4a43-91e7-e7d2ca0507bf-kube-api-access-28zpf" (OuterVolumeSpecName: "kube-api-access-28zpf") pod "4b243286-ae34-4a43-91e7-e7d2ca0507bf" (UID: "4b243286-ae34-4a43-91e7-e7d2ca0507bf"). InnerVolumeSpecName "kube-api-access-28zpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.422190 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b243286-ae34-4a43-91e7-e7d2ca0507bf" (UID: "4b243286-ae34-4a43-91e7-e7d2ca0507bf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.423093 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.423118 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b243286-ae34-4a43-91e7-e7d2ca0507bf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:13:13 crc kubenswrapper[4948]: I1122 05:13:13.423131 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28zpf\" (UniqueName: \"kubernetes.io/projected/4b243286-ae34-4a43-91e7-e7d2ca0507bf-kube-api-access-28zpf\") on node \"crc\" DevicePath \"\"" Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.092584 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4hf65" event={"ID":"4b243286-ae34-4a43-91e7-e7d2ca0507bf","Type":"ContainerDied","Data":"f0525985f9efe9cd99e0c0e2649b0d101f1a769f566683fbb4e762c315d823a8"} Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.092960 4948 scope.go:117] "RemoveContainer" containerID="0e9ddf7a8b238fea8cf0e1327b7494658c8be771b5645a07b921efbfb16b2f47" Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.092665 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4hf65" Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.095122 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" event={"ID":"0298f45b-e114-423e-883c-fcae82858f10","Type":"ContainerStarted","Data":"067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25"} Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.095166 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" event={"ID":"0298f45b-e114-423e-883c-fcae82858f10","Type":"ContainerStarted","Data":"a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089"} Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.111914 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4hf65"] Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.116823 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4hf65"] Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.126208 4948 scope.go:117] "RemoveContainer" containerID="327ae8479d7fbcfeaaa68d82e7b0fc6700417663960a908ad840b367cfbb1c83" Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.127899 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" podStartSLOduration=2.052948056 podStartE2EDuration="8.127864509s" podCreationTimestamp="2025-11-22 05:13:06 +0000 UTC" firstStartedPulling="2025-11-22 05:13:07.255726945 +0000 UTC m=+1589.941737461" lastFinishedPulling="2025-11-22 05:13:13.330643398 +0000 UTC m=+1596.016653914" observedRunningTime="2025-11-22 05:13:14.127431706 +0000 UTC m=+1596.813442232" watchObservedRunningTime="2025-11-22 05:13:14.127864509 +0000 UTC m=+1596.813875075" Nov 22 05:13:14 crc kubenswrapper[4948]: I1122 05:13:14.155233 4948 scope.go:117] "RemoveContainer" containerID="66345be45d3d94b61c093ddf116864f19755f885d4eebb0bc7a59db71436bf98" Nov 22 05:13:15 crc kubenswrapper[4948]: I1122 05:13:15.764731 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" path="/var/lib/kubelet/pods/4b243286-ae34-4a43-91e7-e7d2ca0507bf/volumes" Nov 22 05:13:57 crc kubenswrapper[4948]: I1122 05:13:57.585738 4948 scope.go:117] "RemoveContainer" containerID="c4bf0bfc3163e1d9781352303379fa1016309d6b188e6a786c7a5ed1e2f3ad0a" Nov 22 05:13:57 crc kubenswrapper[4948]: I1122 05:13:57.608191 4948 scope.go:117] "RemoveContainer" containerID="784d51b53f825d2356da5c935e856607f95f41d10d8310910cf6b8c6919c7b7b" Nov 22 05:13:57 crc kubenswrapper[4948]: I1122 05:13:57.656979 4948 scope.go:117] "RemoveContainer" containerID="55fd401c1439972a8275317a593c72c8504a4f4f26af687c001dc7360eb88831" Nov 22 05:13:57 crc kubenswrapper[4948]: I1122 05:13:57.836986 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-h4z45_09733b67-9323-460c-ab8e-e55fbaf31542/control-plane-machine-set-operator/0.log" Nov 22 05:13:58 crc kubenswrapper[4948]: I1122 05:13:58.002425 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-mgqq5_b5f1f7f2-3a3e-464d-84f7-69e726b785a7/machine-api-operator/0.log" Nov 22 05:13:58 crc kubenswrapper[4948]: I1122 05:13:58.008078 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-mgqq5_b5f1f7f2-3a3e-464d-84f7-69e726b785a7/kube-rbac-proxy/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.087848 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-gdrxf_049ab730-c768-4140-98aa-ac16df011ab1/kube-rbac-proxy/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.119942 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-gdrxf_049ab730-c768-4140-98aa-ac16df011ab1/controller/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.221266 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.367007 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.382330 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.389357 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.403030 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.578195 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.583171 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.588265 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.614947 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.749897 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.762614 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.785639 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.788510 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/controller/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.919203 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/frr-metrics/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.937374 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/kube-rbac-proxy/0.log" Nov 22 05:14:13 crc kubenswrapper[4948]: I1122 05:14:13.979164 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/kube-rbac-proxy-frr/0.log" Nov 22 05:14:14 crc kubenswrapper[4948]: I1122 05:14:14.087397 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/reloader/0.log" Nov 22 05:14:14 crc kubenswrapper[4948]: I1122 05:14:14.134745 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-d8hlb_0d29ddea-72d4-4194-8050-8e302d8000a1/frr-k8s-webhook-server/0.log" Nov 22 05:14:14 crc kubenswrapper[4948]: I1122 05:14:14.309606 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-c4c4f5766-cqwrs_cc6bfaa2-11b1-48d0-92c5-e025633693b8/manager/0.log" Nov 22 05:14:14 crc kubenswrapper[4948]: I1122 05:14:14.316331 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/frr/0.log" Nov 22 05:14:14 crc kubenswrapper[4948]: I1122 05:14:14.435515 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-789cf9d5c8-mhm7r_ff397fa2-bc96-49ee-a508-2c0da701972a/webhook-server/0.log" Nov 22 05:14:14 crc kubenswrapper[4948]: I1122 05:14:14.461697 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-m2xpv_67c508f6-24a6-4683-96e1-e324b8a6f5b8/kube-rbac-proxy/0.log" Nov 22 05:14:14 crc kubenswrapper[4948]: I1122 05:14:14.567447 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-m2xpv_67c508f6-24a6-4683-96e1-e324b8a6f5b8/speaker/0.log" Nov 22 05:14:29 crc kubenswrapper[4948]: I1122 05:14:29.949717 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:14:29 crc kubenswrapper[4948]: I1122 05:14:29.950342 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:14:40 crc kubenswrapper[4948]: I1122 05:14:40.651525 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-utilities/0.log" Nov 22 05:14:40 crc kubenswrapper[4948]: I1122 05:14:40.880015 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-content/0.log" Nov 22 05:14:40 crc kubenswrapper[4948]: I1122 05:14:40.888397 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-utilities/0.log" Nov 22 05:14:40 crc kubenswrapper[4948]: I1122 05:14:40.894005 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-content/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.034950 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-content/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.055136 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-utilities/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.235479 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-utilities/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.296294 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/registry-server/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.392893 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-content/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.399881 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-utilities/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.410614 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-content/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.555816 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-utilities/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.559228 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-content/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.774325 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/util/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.918404 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/pull/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.938163 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/util/0.log" Nov 22 05:14:41 crc kubenswrapper[4948]: I1122 05:14:41.962006 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/pull/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.041351 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/registry-server/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.139355 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/extract/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.144547 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/util/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.174831 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/pull/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.313933 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-n57dg_9af9a2c0-1d8b-47e3-bc8a-5573b25fb786/marketplace-operator/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.359083 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-utilities/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.503896 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-content/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.548778 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-content/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.555287 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-utilities/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.693715 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-content/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.720370 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-utilities/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.789742 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/registry-server/0.log" Nov 22 05:14:42 crc kubenswrapper[4948]: I1122 05:14:42.886298 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-utilities/0.log" Nov 22 05:14:43 crc kubenswrapper[4948]: I1122 05:14:43.073210 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-content/0.log" Nov 22 05:14:43 crc kubenswrapper[4948]: I1122 05:14:43.080829 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-utilities/0.log" Nov 22 05:14:43 crc kubenswrapper[4948]: I1122 05:14:43.088802 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-content/0.log" Nov 22 05:14:43 crc kubenswrapper[4948]: I1122 05:14:43.202922 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-utilities/0.log" Nov 22 05:14:43 crc kubenswrapper[4948]: I1122 05:14:43.269299 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-content/0.log" Nov 22 05:14:43 crc kubenswrapper[4948]: I1122 05:14:43.569433 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/registry-server/0.log" Nov 22 05:14:59 crc kubenswrapper[4948]: I1122 05:14:59.789986 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:14:59 crc kubenswrapper[4948]: I1122 05:14:59.792196 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.162902 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr"] Nov 22 05:15:00 crc kubenswrapper[4948]: E1122 05:15:00.163171 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="extract-utilities" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.163185 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="extract-utilities" Nov 22 05:15:00 crc kubenswrapper[4948]: E1122 05:15:00.163199 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="extract-content" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.163208 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="extract-content" Nov 22 05:15:00 crc kubenswrapper[4948]: E1122 05:15:00.163216 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="registry-server" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.163224 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="registry-server" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.163361 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b243286-ae34-4a43-91e7-e7d2ca0507bf" containerName="registry-server" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.164240 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.168605 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.168841 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.177495 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr"] Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.320680 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47sf2\" (UniqueName: \"kubernetes.io/projected/13b5f842-07a1-4bfb-bf36-78865d8be0ec-kube-api-access-47sf2\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.320796 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13b5f842-07a1-4bfb-bf36-78865d8be0ec-config-volume\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.320911 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13b5f842-07a1-4bfb-bf36-78865d8be0ec-secret-volume\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.422182 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13b5f842-07a1-4bfb-bf36-78865d8be0ec-config-volume\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.422274 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13b5f842-07a1-4bfb-bf36-78865d8be0ec-secret-volume\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.422303 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47sf2\" (UniqueName: \"kubernetes.io/projected/13b5f842-07a1-4bfb-bf36-78865d8be0ec-kube-api-access-47sf2\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.423090 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13b5f842-07a1-4bfb-bf36-78865d8be0ec-config-volume\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.432133 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13b5f842-07a1-4bfb-bf36-78865d8be0ec-secret-volume\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.440427 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47sf2\" (UniqueName: \"kubernetes.io/projected/13b5f842-07a1-4bfb-bf36-78865d8be0ec-kube-api-access-47sf2\") pod \"collect-profiles-29396475-w9dpr\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.491737 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:00 crc kubenswrapper[4948]: I1122 05:15:00.909430 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr"] Nov 22 05:15:00 crc kubenswrapper[4948]: W1122 05:15:00.925217 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13b5f842_07a1_4bfb_bf36_78865d8be0ec.slice/crio-36421d88a92b749c93d9cdf469b1e63bbcfdd65c4db1a659e2324d1eef005e76 WatchSource:0}: Error finding container 36421d88a92b749c93d9cdf469b1e63bbcfdd65c4db1a659e2324d1eef005e76: Status 404 returned error can't find the container with id 36421d88a92b749c93d9cdf469b1e63bbcfdd65c4db1a659e2324d1eef005e76 Nov 22 05:15:01 crc kubenswrapper[4948]: I1122 05:15:01.809033 4948 generic.go:334] "Generic (PLEG): container finished" podID="13b5f842-07a1-4bfb-bf36-78865d8be0ec" containerID="3871557a079385c6b7f573dd97a9de5ee14d0260e45f2dd27a918bae15f4fe1a" exitCode=0 Nov 22 05:15:01 crc kubenswrapper[4948]: I1122 05:15:01.809103 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" event={"ID":"13b5f842-07a1-4bfb-bf36-78865d8be0ec","Type":"ContainerDied","Data":"3871557a079385c6b7f573dd97a9de5ee14d0260e45f2dd27a918bae15f4fe1a"} Nov 22 05:15:01 crc kubenswrapper[4948]: I1122 05:15:01.809160 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" event={"ID":"13b5f842-07a1-4bfb-bf36-78865d8be0ec","Type":"ContainerStarted","Data":"36421d88a92b749c93d9cdf469b1e63bbcfdd65c4db1a659e2324d1eef005e76"} Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.123679 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.159712 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47sf2\" (UniqueName: \"kubernetes.io/projected/13b5f842-07a1-4bfb-bf36-78865d8be0ec-kube-api-access-47sf2\") pod \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.159783 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13b5f842-07a1-4bfb-bf36-78865d8be0ec-config-volume\") pod \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.160984 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13b5f842-07a1-4bfb-bf36-78865d8be0ec-config-volume" (OuterVolumeSpecName: "config-volume") pod "13b5f842-07a1-4bfb-bf36-78865d8be0ec" (UID: "13b5f842-07a1-4bfb-bf36-78865d8be0ec"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.168002 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13b5f842-07a1-4bfb-bf36-78865d8be0ec-kube-api-access-47sf2" (OuterVolumeSpecName: "kube-api-access-47sf2") pod "13b5f842-07a1-4bfb-bf36-78865d8be0ec" (UID: "13b5f842-07a1-4bfb-bf36-78865d8be0ec"). InnerVolumeSpecName "kube-api-access-47sf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.260855 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13b5f842-07a1-4bfb-bf36-78865d8be0ec-secret-volume\") pod \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\" (UID: \"13b5f842-07a1-4bfb-bf36-78865d8be0ec\") " Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.261220 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47sf2\" (UniqueName: \"kubernetes.io/projected/13b5f842-07a1-4bfb-bf36-78865d8be0ec-kube-api-access-47sf2\") on node \"crc\" DevicePath \"\"" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.261237 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13b5f842-07a1-4bfb-bf36-78865d8be0ec-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.266519 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b5f842-07a1-4bfb-bf36-78865d8be0ec-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "13b5f842-07a1-4bfb-bf36-78865d8be0ec" (UID: "13b5f842-07a1-4bfb-bf36-78865d8be0ec"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.362548 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13b5f842-07a1-4bfb-bf36-78865d8be0ec-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.836039 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" event={"ID":"13b5f842-07a1-4bfb-bf36-78865d8be0ec","Type":"ContainerDied","Data":"36421d88a92b749c93d9cdf469b1e63bbcfdd65c4db1a659e2324d1eef005e76"} Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.836962 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36421d88a92b749c93d9cdf469b1e63bbcfdd65c4db1a659e2324d1eef005e76" Nov 22 05:15:03 crc kubenswrapper[4948]: I1122 05:15:03.837232 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396475-w9dpr" Nov 22 05:15:29 crc kubenswrapper[4948]: I1122 05:15:29.789738 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:15:29 crc kubenswrapper[4948]: I1122 05:15:29.790446 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:15:29 crc kubenswrapper[4948]: I1122 05:15:29.790527 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 05:15:29 crc kubenswrapper[4948]: I1122 05:15:29.791767 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 05:15:29 crc kubenswrapper[4948]: I1122 05:15:29.791856 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" gracePeriod=600 Nov 22 05:15:29 crc kubenswrapper[4948]: E1122 05:15:29.927509 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:15:30 crc kubenswrapper[4948]: I1122 05:15:30.032352 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" exitCode=0 Nov 22 05:15:30 crc kubenswrapper[4948]: I1122 05:15:30.032424 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754"} Nov 22 05:15:30 crc kubenswrapper[4948]: I1122 05:15:30.032522 4948 scope.go:117] "RemoveContainer" containerID="23c94a6fcacee9c3faebd2019427dd27c2a4acb1f102d2cd48e6ac38c0f38971" Nov 22 05:15:30 crc kubenswrapper[4948]: I1122 05:15:30.034309 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:15:30 crc kubenswrapper[4948]: E1122 05:15:30.034768 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:15:40 crc kubenswrapper[4948]: I1122 05:15:40.758445 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:15:40 crc kubenswrapper[4948]: E1122 05:15:40.759891 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:15:46 crc kubenswrapper[4948]: I1122 05:15:46.181876 4948 generic.go:334] "Generic (PLEG): container finished" podID="0298f45b-e114-423e-883c-fcae82858f10" containerID="a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089" exitCode=0 Nov 22 05:15:46 crc kubenswrapper[4948]: I1122 05:15:46.182003 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" event={"ID":"0298f45b-e114-423e-883c-fcae82858f10","Type":"ContainerDied","Data":"a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089"} Nov 22 05:15:46 crc kubenswrapper[4948]: I1122 05:15:46.183246 4948 scope.go:117] "RemoveContainer" containerID="a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089" Nov 22 05:15:46 crc kubenswrapper[4948]: I1122 05:15:46.630086 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cqz2s_must-gather-6dkj7_0298f45b-e114-423e-883c-fcae82858f10/gather/0.log" Nov 22 05:15:53 crc kubenswrapper[4948]: I1122 05:15:53.734627 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-cqz2s/must-gather-6dkj7"] Nov 22 05:15:53 crc kubenswrapper[4948]: I1122 05:15:53.736267 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" podUID="0298f45b-e114-423e-883c-fcae82858f10" containerName="copy" containerID="cri-o://067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25" gracePeriod=2 Nov 22 05:15:53 crc kubenswrapper[4948]: I1122 05:15:53.743134 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-cqz2s/must-gather-6dkj7"] Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.150233 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cqz2s_must-gather-6dkj7_0298f45b-e114-423e-883c-fcae82858f10/copy/0.log" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.150987 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.242595 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cqz2s_must-gather-6dkj7_0298f45b-e114-423e-883c-fcae82858f10/copy/0.log" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.243150 4948 generic.go:334] "Generic (PLEG): container finished" podID="0298f45b-e114-423e-883c-fcae82858f10" containerID="067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25" exitCode=143 Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.243203 4948 scope.go:117] "RemoveContainer" containerID="067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.243215 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cqz2s/must-gather-6dkj7" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.266119 4948 scope.go:117] "RemoveContainer" containerID="a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.303428 4948 scope.go:117] "RemoveContainer" containerID="067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25" Nov 22 05:15:54 crc kubenswrapper[4948]: E1122 05:15:54.305163 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25\": container with ID starting with 067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25 not found: ID does not exist" containerID="067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.305201 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25"} err="failed to get container status \"067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25\": rpc error: code = NotFound desc = could not find container \"067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25\": container with ID starting with 067e622736e85e7107698c72a019c3de3722d689573d5b7d29052a3638c64d25 not found: ID does not exist" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.305227 4948 scope.go:117] "RemoveContainer" containerID="a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089" Nov 22 05:15:54 crc kubenswrapper[4948]: E1122 05:15:54.305575 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089\": container with ID starting with a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089 not found: ID does not exist" containerID="a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.305606 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089"} err="failed to get container status \"a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089\": rpc error: code = NotFound desc = could not find container \"a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089\": container with ID starting with a4c807a8b27ccaf77b34268dc07b5bf46ae5914e3a463613e4d9af9dae126089 not found: ID does not exist" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.330680 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmp42\" (UniqueName: \"kubernetes.io/projected/0298f45b-e114-423e-883c-fcae82858f10-kube-api-access-vmp42\") pod \"0298f45b-e114-423e-883c-fcae82858f10\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.331247 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0298f45b-e114-423e-883c-fcae82858f10-must-gather-output\") pod \"0298f45b-e114-423e-883c-fcae82858f10\" (UID: \"0298f45b-e114-423e-883c-fcae82858f10\") " Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.348705 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0298f45b-e114-423e-883c-fcae82858f10-kube-api-access-vmp42" (OuterVolumeSpecName: "kube-api-access-vmp42") pod "0298f45b-e114-423e-883c-fcae82858f10" (UID: "0298f45b-e114-423e-883c-fcae82858f10"). InnerVolumeSpecName "kube-api-access-vmp42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.417611 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0298f45b-e114-423e-883c-fcae82858f10-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "0298f45b-e114-423e-883c-fcae82858f10" (UID: "0298f45b-e114-423e-883c-fcae82858f10"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.432649 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmp42\" (UniqueName: \"kubernetes.io/projected/0298f45b-e114-423e-883c-fcae82858f10-kube-api-access-vmp42\") on node \"crc\" DevicePath \"\"" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.432694 4948 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0298f45b-e114-423e-883c-fcae82858f10-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 05:15:54 crc kubenswrapper[4948]: I1122 05:15:54.758873 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:15:54 crc kubenswrapper[4948]: E1122 05:15:54.759457 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:15:55 crc kubenswrapper[4948]: I1122 05:15:55.770603 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0298f45b-e114-423e-883c-fcae82858f10" path="/var/lib/kubelet/pods/0298f45b-e114-423e-883c-fcae82858f10/volumes" Nov 22 05:15:57 crc kubenswrapper[4948]: I1122 05:15:57.742447 4948 scope.go:117] "RemoveContainer" containerID="e4ba0d6775262ab1ad86a567fd6ba466208ea867415590e5157346417846b883" Nov 22 05:15:57 crc kubenswrapper[4948]: I1122 05:15:57.778149 4948 scope.go:117] "RemoveContainer" containerID="df262e563217e766d37a1eb610b4ba3a87e5f679fbeb6c5481e9d4c03df0a7e6" Nov 22 05:15:57 crc kubenswrapper[4948]: I1122 05:15:57.805345 4948 scope.go:117] "RemoveContainer" containerID="a4e810d38c554d793c35151ef1d752de9d0e731f4f1c66f7b3e6d96290405a37" Nov 22 05:15:57 crc kubenswrapper[4948]: I1122 05:15:57.867478 4948 scope.go:117] "RemoveContainer" containerID="f8b69cf32befe7afd8cdeee0fd5606fd35ae945b0ae1ab07971c30cde9a0ae64" Nov 22 05:16:07 crc kubenswrapper[4948]: I1122 05:16:07.759995 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:16:07 crc kubenswrapper[4948]: E1122 05:16:07.761151 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:16:21 crc kubenswrapper[4948]: I1122 05:16:21.759016 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:16:21 crc kubenswrapper[4948]: E1122 05:16:21.760078 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:16:34 crc kubenswrapper[4948]: I1122 05:16:34.758381 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:16:34 crc kubenswrapper[4948]: E1122 05:16:34.759393 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:16:45 crc kubenswrapper[4948]: I1122 05:16:45.758332 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:16:45 crc kubenswrapper[4948]: E1122 05:16:45.759362 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:16:57 crc kubenswrapper[4948]: I1122 05:16:57.938725 4948 scope.go:117] "RemoveContainer" containerID="aa8c815a806327d034f2d01fb667422ee133bb07e49ebf3856c76944215fec1a" Nov 22 05:16:57 crc kubenswrapper[4948]: I1122 05:16:57.996425 4948 scope.go:117] "RemoveContainer" containerID="5267cbb1c6eb8d28da22de7c536c289278e71bdbefcf319c7579941cdc799508" Nov 22 05:16:58 crc kubenswrapper[4948]: I1122 05:16:58.017023 4948 scope.go:117] "RemoveContainer" containerID="999a9e6e81ccc112265cf6828f3d2e01f2dca86d41bf70f40279a5b91f22afdb" Nov 22 05:16:58 crc kubenswrapper[4948]: I1122 05:16:58.043479 4948 scope.go:117] "RemoveContainer" containerID="6d41d5334b88ca9a815e1dad187419919264864f39498ebcfaf9b8b2d3e807dc" Nov 22 05:16:58 crc kubenswrapper[4948]: I1122 05:16:58.068066 4948 scope.go:117] "RemoveContainer" containerID="d450927dd523b8b64dd98d927699272bf7908b53cc5a7c02be62532282f10bc7" Nov 22 05:16:58 crc kubenswrapper[4948]: I1122 05:16:58.758311 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:16:58 crc kubenswrapper[4948]: E1122 05:16:58.758630 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:17:11 crc kubenswrapper[4948]: I1122 05:17:11.758719 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:17:11 crc kubenswrapper[4948]: E1122 05:17:11.759971 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:17:23 crc kubenswrapper[4948]: I1122 05:17:23.758262 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:17:23 crc kubenswrapper[4948]: E1122 05:17:23.759179 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:17:36 crc kubenswrapper[4948]: I1122 05:17:36.759349 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:17:36 crc kubenswrapper[4948]: E1122 05:17:36.760613 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:17:49 crc kubenswrapper[4948]: I1122 05:17:49.762073 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:17:49 crc kubenswrapper[4948]: E1122 05:17:49.763024 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:17:58 crc kubenswrapper[4948]: I1122 05:17:58.155784 4948 scope.go:117] "RemoveContainer" containerID="f10389ab65b323618622e74f15ffb8e0880f223907ba7bee15481805465c8dc6" Nov 22 05:17:58 crc kubenswrapper[4948]: I1122 05:17:58.179882 4948 scope.go:117] "RemoveContainer" containerID="f1a5830e641af270376aefef6ca0ccb665324e3ea42647742fa12c651f8a762a" Nov 22 05:18:00 crc kubenswrapper[4948]: I1122 05:18:00.758355 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:18:00 crc kubenswrapper[4948]: E1122 05:18:00.759033 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:18:14 crc kubenswrapper[4948]: I1122 05:18:14.757687 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:18:14 crc kubenswrapper[4948]: E1122 05:18:14.758514 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.432176 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tbcsq/must-gather-hgmc6"] Nov 22 05:18:17 crc kubenswrapper[4948]: E1122 05:18:17.432729 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b5f842-07a1-4bfb-bf36-78865d8be0ec" containerName="collect-profiles" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.432744 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b5f842-07a1-4bfb-bf36-78865d8be0ec" containerName="collect-profiles" Nov 22 05:18:17 crc kubenswrapper[4948]: E1122 05:18:17.432757 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0298f45b-e114-423e-883c-fcae82858f10" containerName="copy" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.432767 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0298f45b-e114-423e-883c-fcae82858f10" containerName="copy" Nov 22 05:18:17 crc kubenswrapper[4948]: E1122 05:18:17.432800 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0298f45b-e114-423e-883c-fcae82858f10" containerName="gather" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.432812 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0298f45b-e114-423e-883c-fcae82858f10" containerName="gather" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.432937 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="13b5f842-07a1-4bfb-bf36-78865d8be0ec" containerName="collect-profiles" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.432962 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0298f45b-e114-423e-883c-fcae82858f10" containerName="copy" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.432973 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0298f45b-e114-423e-883c-fcae82858f10" containerName="gather" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.433707 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.437305 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tbcsq"/"openshift-service-ca.crt" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.437410 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tbcsq"/"kube-root-ca.crt" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.442312 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2qsx\" (UniqueName: \"kubernetes.io/projected/46101a9c-9839-4ffc-8c15-12f12103f441-kube-api-access-d2qsx\") pod \"must-gather-hgmc6\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.442474 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46101a9c-9839-4ffc-8c15-12f12103f441-must-gather-output\") pod \"must-gather-hgmc6\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.452849 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-tbcsq"/"default-dockercfg-l7q6t" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.456267 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tbcsq/must-gather-hgmc6"] Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.544062 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46101a9c-9839-4ffc-8c15-12f12103f441-must-gather-output\") pod \"must-gather-hgmc6\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.544115 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2qsx\" (UniqueName: \"kubernetes.io/projected/46101a9c-9839-4ffc-8c15-12f12103f441-kube-api-access-d2qsx\") pod \"must-gather-hgmc6\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.544552 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46101a9c-9839-4ffc-8c15-12f12103f441-must-gather-output\") pod \"must-gather-hgmc6\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.566398 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2qsx\" (UniqueName: \"kubernetes.io/projected/46101a9c-9839-4ffc-8c15-12f12103f441-kube-api-access-d2qsx\") pod \"must-gather-hgmc6\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:17 crc kubenswrapper[4948]: I1122 05:18:17.758184 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:18:18 crc kubenswrapper[4948]: I1122 05:18:18.196090 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tbcsq/must-gather-hgmc6"] Nov 22 05:18:18 crc kubenswrapper[4948]: I1122 05:18:18.302582 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" event={"ID":"46101a9c-9839-4ffc-8c15-12f12103f441","Type":"ContainerStarted","Data":"f608274e0a169243b8b4cecaf45a19bbe903a223a32d6caa6de0267fce3b2091"} Nov 22 05:18:19 crc kubenswrapper[4948]: I1122 05:18:19.309962 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" event={"ID":"46101a9c-9839-4ffc-8c15-12f12103f441","Type":"ContainerStarted","Data":"409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e"} Nov 22 05:18:19 crc kubenswrapper[4948]: I1122 05:18:19.310212 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" event={"ID":"46101a9c-9839-4ffc-8c15-12f12103f441","Type":"ContainerStarted","Data":"75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e"} Nov 22 05:18:28 crc kubenswrapper[4948]: I1122 05:18:28.758729 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:18:28 crc kubenswrapper[4948]: E1122 05:18:28.762668 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:18:40 crc kubenswrapper[4948]: I1122 05:18:40.758643 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:18:40 crc kubenswrapper[4948]: E1122 05:18:40.759432 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:18:53 crc kubenswrapper[4948]: I1122 05:18:53.758001 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:18:53 crc kubenswrapper[4948]: E1122 05:18:53.758655 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:19:04 crc kubenswrapper[4948]: I1122 05:19:04.831988 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-h4z45_09733b67-9323-460c-ab8e-e55fbaf31542/control-plane-machine-set-operator/0.log" Nov 22 05:19:04 crc kubenswrapper[4948]: I1122 05:19:04.984356 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-mgqq5_b5f1f7f2-3a3e-464d-84f7-69e726b785a7/kube-rbac-proxy/0.log" Nov 22 05:19:05 crc kubenswrapper[4948]: I1122 05:19:05.000684 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-mgqq5_b5f1f7f2-3a3e-464d-84f7-69e726b785a7/machine-api-operator/0.log" Nov 22 05:19:06 crc kubenswrapper[4948]: I1122 05:19:06.757853 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:19:06 crc kubenswrapper[4948]: E1122 05:19:06.758354 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:19:18 crc kubenswrapper[4948]: I1122 05:19:18.758698 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:19:18 crc kubenswrapper[4948]: E1122 05:19:18.759796 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:19:21 crc kubenswrapper[4948]: I1122 05:19:21.557628 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-gdrxf_049ab730-c768-4140-98aa-ac16df011ab1/kube-rbac-proxy/0.log" Nov 22 05:19:21 crc kubenswrapper[4948]: I1122 05:19:21.592446 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-gdrxf_049ab730-c768-4140-98aa-ac16df011ab1/controller/0.log" Nov 22 05:19:21 crc kubenswrapper[4948]: I1122 05:19:21.734438 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:19:21 crc kubenswrapper[4948]: I1122 05:19:21.908999 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:19:21 crc kubenswrapper[4948]: I1122 05:19:21.925236 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:19:21 crc kubenswrapper[4948]: I1122 05:19:21.934075 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:19:21 crc kubenswrapper[4948]: I1122 05:19:21.974755 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.134850 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.145955 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.168316 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.174245 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.319300 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-reloader/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.324094 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-metrics/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.324854 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/cp-frr-files/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.328567 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/controller/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.485956 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/frr-metrics/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.492551 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/kube-rbac-proxy-frr/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.567152 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/kube-rbac-proxy/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.698453 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/reloader/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.793193 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-d8hlb_0d29ddea-72d4-4194-8050-8e302d8000a1/frr-k8s-webhook-server/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.894359 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-c4c4f5766-cqwrs_cc6bfaa2-11b1-48d0-92c5-e025633693b8/manager/0.log" Nov 22 05:19:22 crc kubenswrapper[4948]: I1122 05:19:22.952207 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-fnfgf_fd5e0eea-0522-4358-88af-b2b648549f7b/frr/0.log" Nov 22 05:19:23 crc kubenswrapper[4948]: I1122 05:19:23.020188 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-789cf9d5c8-mhm7r_ff397fa2-bc96-49ee-a508-2c0da701972a/webhook-server/0.log" Nov 22 05:19:23 crc kubenswrapper[4948]: I1122 05:19:23.122276 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-m2xpv_67c508f6-24a6-4683-96e1-e324b8a6f5b8/kube-rbac-proxy/0.log" Nov 22 05:19:23 crc kubenswrapper[4948]: I1122 05:19:23.268891 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-m2xpv_67c508f6-24a6-4683-96e1-e324b8a6f5b8/speaker/0.log" Nov 22 05:19:30 crc kubenswrapper[4948]: I1122 05:19:30.757652 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:19:30 crc kubenswrapper[4948]: E1122 05:19:30.758343 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:19:45 crc kubenswrapper[4948]: I1122 05:19:45.758745 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:19:45 crc kubenswrapper[4948]: E1122 05:19:45.759859 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.335294 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-utilities/0.log" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.459508 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-content/0.log" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.460166 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-utilities/0.log" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.512742 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-content/0.log" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.660421 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-utilities/0.log" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.669002 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/extract-content/0.log" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.924780 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-utilities/0.log" Nov 22 05:19:47 crc kubenswrapper[4948]: I1122 05:19:47.981111 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2gxgn_4a270484-f75f-485a-9a16-26782de80ed1/registry-server/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.007817 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-content/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.031334 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-utilities/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.091282 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-content/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.236831 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-utilities/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.255557 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/extract-content/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.469577 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/util/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.643911 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/pull/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.668267 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/pull/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.734768 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/util/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.769391 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nm2b5_5c2126e6-e955-406b-b623-e9b991c5fa40/registry-server/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.867564 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/util/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.884126 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/pull/0.log" Nov 22 05:19:48 crc kubenswrapper[4948]: I1122 05:19:48.902858 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6r2zlk_280cc6a0-df02-4d30-83c5-2c927594480b/extract/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.058619 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-n57dg_9af9a2c0-1d8b-47e3-bc8a-5573b25fb786/marketplace-operator/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.120167 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-utilities/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.267150 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-utilities/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.299617 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-content/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.342574 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-content/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.477045 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-utilities/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.485338 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/extract-content/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.555840 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5z7w4_a5e26833-5534-41ea-abff-eddb319d4ca2/registry-server/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.658327 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-utilities/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.794899 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-content/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.835343 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-utilities/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.835564 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-content/0.log" Nov 22 05:19:49 crc kubenswrapper[4948]: I1122 05:19:49.996167 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-content/0.log" Nov 22 05:19:50 crc kubenswrapper[4948]: I1122 05:19:50.035379 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/extract-utilities/0.log" Nov 22 05:19:50 crc kubenswrapper[4948]: I1122 05:19:50.336219 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tqk2v_dcb0a74e-a042-44d5-86ad-8a04e2b1fd69/registry-server/0.log" Nov 22 05:20:00 crc kubenswrapper[4948]: I1122 05:20:00.758834 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:20:00 crc kubenswrapper[4948]: E1122 05:20:00.759985 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:20:13 crc kubenswrapper[4948]: I1122 05:20:13.759869 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:20:13 crc kubenswrapper[4948]: E1122 05:20:13.767929 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:20:27 crc kubenswrapper[4948]: I1122 05:20:27.758543 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:20:27 crc kubenswrapper[4948]: E1122 05:20:27.759260 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pf8gx_openshift-machine-config-operator(126f010b-a640-4133-b63f-d2976da99215)\"" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" Nov 22 05:20:38 crc kubenswrapper[4948]: I1122 05:20:38.759264 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" Nov 22 05:20:39 crc kubenswrapper[4948]: I1122 05:20:39.241404 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"81f7f6918216866254a473493a577319e8267975e0e70f6ff3e2173c15978179"} Nov 22 05:20:39 crc kubenswrapper[4948]: I1122 05:20:39.266989 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" podStartSLOduration=142.266960375 podStartE2EDuration="2m22.266960375s" podCreationTimestamp="2025-11-22 05:18:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 05:18:19.333811385 +0000 UTC m=+1902.019821901" watchObservedRunningTime="2025-11-22 05:20:39.266960375 +0000 UTC m=+2041.952970921" Nov 22 05:20:52 crc kubenswrapper[4948]: I1122 05:20:52.350864 4948 generic.go:334] "Generic (PLEG): container finished" podID="46101a9c-9839-4ffc-8c15-12f12103f441" containerID="75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e" exitCode=0 Nov 22 05:20:52 crc kubenswrapper[4948]: I1122 05:20:52.350996 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" event={"ID":"46101a9c-9839-4ffc-8c15-12f12103f441","Type":"ContainerDied","Data":"75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e"} Nov 22 05:20:52 crc kubenswrapper[4948]: I1122 05:20:52.352216 4948 scope.go:117] "RemoveContainer" containerID="75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e" Nov 22 05:20:52 crc kubenswrapper[4948]: I1122 05:20:52.889448 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tbcsq_must-gather-hgmc6_46101a9c-9839-4ffc-8c15-12f12103f441/gather/0.log" Nov 22 05:21:01 crc kubenswrapper[4948]: I1122 05:21:01.719230 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tbcsq/must-gather-hgmc6"] Nov 22 05:21:01 crc kubenswrapper[4948]: I1122 05:21:01.720180 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" containerName="copy" containerID="cri-o://409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e" gracePeriod=2 Nov 22 05:21:01 crc kubenswrapper[4948]: I1122 05:21:01.727019 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tbcsq/must-gather-hgmc6"] Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.078918 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tbcsq_must-gather-hgmc6_46101a9c-9839-4ffc-8c15-12f12103f441/copy/0.log" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.079423 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.233197 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46101a9c-9839-4ffc-8c15-12f12103f441-must-gather-output\") pod \"46101a9c-9839-4ffc-8c15-12f12103f441\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.233619 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2qsx\" (UniqueName: \"kubernetes.io/projected/46101a9c-9839-4ffc-8c15-12f12103f441-kube-api-access-d2qsx\") pod \"46101a9c-9839-4ffc-8c15-12f12103f441\" (UID: \"46101a9c-9839-4ffc-8c15-12f12103f441\") " Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.239811 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46101a9c-9839-4ffc-8c15-12f12103f441-kube-api-access-d2qsx" (OuterVolumeSpecName: "kube-api-access-d2qsx") pod "46101a9c-9839-4ffc-8c15-12f12103f441" (UID: "46101a9c-9839-4ffc-8c15-12f12103f441"). InnerVolumeSpecName "kube-api-access-d2qsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.287480 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46101a9c-9839-4ffc-8c15-12f12103f441-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "46101a9c-9839-4ffc-8c15-12f12103f441" (UID: "46101a9c-9839-4ffc-8c15-12f12103f441"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.334995 4948 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46101a9c-9839-4ffc-8c15-12f12103f441-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.335029 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2qsx\" (UniqueName: \"kubernetes.io/projected/46101a9c-9839-4ffc-8c15-12f12103f441-kube-api-access-d2qsx\") on node \"crc\" DevicePath \"\"" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.421746 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tbcsq_must-gather-hgmc6_46101a9c-9839-4ffc-8c15-12f12103f441/copy/0.log" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.422125 4948 generic.go:334] "Generic (PLEG): container finished" podID="46101a9c-9839-4ffc-8c15-12f12103f441" containerID="409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e" exitCode=143 Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.422182 4948 scope.go:117] "RemoveContainer" containerID="409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.422200 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tbcsq/must-gather-hgmc6" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.442305 4948 scope.go:117] "RemoveContainer" containerID="75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.472683 4948 scope.go:117] "RemoveContainer" containerID="409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e" Nov 22 05:21:02 crc kubenswrapper[4948]: E1122 05:21:02.473045 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e\": container with ID starting with 409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e not found: ID does not exist" containerID="409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.473077 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e"} err="failed to get container status \"409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e\": rpc error: code = NotFound desc = could not find container \"409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e\": container with ID starting with 409c6447b17cbd908bb51e83c09936a6f5ef85afca48191aed1bc367d89d5e2e not found: ID does not exist" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.473098 4948 scope.go:117] "RemoveContainer" containerID="75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e" Nov 22 05:21:02 crc kubenswrapper[4948]: E1122 05:21:02.473289 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e\": container with ID starting with 75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e not found: ID does not exist" containerID="75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e" Nov 22 05:21:02 crc kubenswrapper[4948]: I1122 05:21:02.473313 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e"} err="failed to get container status \"75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e\": rpc error: code = NotFound desc = could not find container \"75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e\": container with ID starting with 75ea9646ebc0739de7ffdb0ff2abea5520923218681db76a8cf70c806a91714e not found: ID does not exist" Nov 22 05:21:03 crc kubenswrapper[4948]: I1122 05:21:03.771009 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" path="/var/lib/kubelet/pods/46101a9c-9839-4ffc-8c15-12f12103f441/volumes" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.451985 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dbk29"] Nov 22 05:21:24 crc kubenswrapper[4948]: E1122 05:21:24.452938 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" containerName="copy" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.452956 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" containerName="copy" Nov 22 05:21:24 crc kubenswrapper[4948]: E1122 05:21:24.452982 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" containerName="gather" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.452991 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" containerName="gather" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.453170 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" containerName="gather" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.453203 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="46101a9c-9839-4ffc-8c15-12f12103f441" containerName="copy" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.454331 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.465581 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbk29"] Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.566280 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzxmv\" (UniqueName: \"kubernetes.io/projected/5f54a022-d676-4110-939c-d0f6cd75aeab-kube-api-access-jzxmv\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.566357 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-utilities\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.566407 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-catalog-content\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.667979 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzxmv\" (UniqueName: \"kubernetes.io/projected/5f54a022-d676-4110-939c-d0f6cd75aeab-kube-api-access-jzxmv\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.668280 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-utilities\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.668423 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-catalog-content\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.669207 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-utilities\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.669080 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-catalog-content\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.692368 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzxmv\" (UniqueName: \"kubernetes.io/projected/5f54a022-d676-4110-939c-d0f6cd75aeab-kube-api-access-jzxmv\") pod \"redhat-marketplace-dbk29\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:24 crc kubenswrapper[4948]: I1122 05:21:24.783526 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:25 crc kubenswrapper[4948]: I1122 05:21:25.041099 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbk29"] Nov 22 05:21:25 crc kubenswrapper[4948]: I1122 05:21:25.590713 4948 generic.go:334] "Generic (PLEG): container finished" podID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerID="31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3" exitCode=0 Nov 22 05:21:25 crc kubenswrapper[4948]: I1122 05:21:25.590784 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbk29" event={"ID":"5f54a022-d676-4110-939c-d0f6cd75aeab","Type":"ContainerDied","Data":"31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3"} Nov 22 05:21:25 crc kubenswrapper[4948]: I1122 05:21:25.590831 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbk29" event={"ID":"5f54a022-d676-4110-939c-d0f6cd75aeab","Type":"ContainerStarted","Data":"53ee10ee414ca5ec1a4e647339af2304062cb2898b8e976960083be513907692"} Nov 22 05:21:25 crc kubenswrapper[4948]: I1122 05:21:25.593361 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 05:21:26 crc kubenswrapper[4948]: I1122 05:21:26.606382 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbk29" event={"ID":"5f54a022-d676-4110-939c-d0f6cd75aeab","Type":"ContainerStarted","Data":"de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722"} Nov 22 05:21:27 crc kubenswrapper[4948]: I1122 05:21:27.613786 4948 generic.go:334] "Generic (PLEG): container finished" podID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerID="de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722" exitCode=0 Nov 22 05:21:27 crc kubenswrapper[4948]: I1122 05:21:27.613851 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbk29" event={"ID":"5f54a022-d676-4110-939c-d0f6cd75aeab","Type":"ContainerDied","Data":"de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722"} Nov 22 05:21:29 crc kubenswrapper[4948]: I1122 05:21:28.635890 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbk29" event={"ID":"5f54a022-d676-4110-939c-d0f6cd75aeab","Type":"ContainerStarted","Data":"30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85"} Nov 22 05:21:29 crc kubenswrapper[4948]: I1122 05:21:28.671614 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dbk29" podStartSLOduration=2.196060429 podStartE2EDuration="4.671584033s" podCreationTimestamp="2025-11-22 05:21:24 +0000 UTC" firstStartedPulling="2025-11-22 05:21:25.592871516 +0000 UTC m=+2088.278882072" lastFinishedPulling="2025-11-22 05:21:28.06839512 +0000 UTC m=+2090.754405676" observedRunningTime="2025-11-22 05:21:28.665316465 +0000 UTC m=+2091.351327021" watchObservedRunningTime="2025-11-22 05:21:28.671584033 +0000 UTC m=+2091.357594599" Nov 22 05:21:34 crc kubenswrapper[4948]: I1122 05:21:34.784125 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:34 crc kubenswrapper[4948]: I1122 05:21:34.784690 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:34 crc kubenswrapper[4948]: I1122 05:21:34.844014 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:35 crc kubenswrapper[4948]: I1122 05:21:35.746112 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:35 crc kubenswrapper[4948]: I1122 05:21:35.800938 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbk29"] Nov 22 05:21:37 crc kubenswrapper[4948]: I1122 05:21:37.697153 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dbk29" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="registry-server" containerID="cri-o://30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85" gracePeriod=2 Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.105779 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.183424 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-utilities\") pod \"5f54a022-d676-4110-939c-d0f6cd75aeab\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.183532 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzxmv\" (UniqueName: \"kubernetes.io/projected/5f54a022-d676-4110-939c-d0f6cd75aeab-kube-api-access-jzxmv\") pod \"5f54a022-d676-4110-939c-d0f6cd75aeab\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.184212 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-utilities" (OuterVolumeSpecName: "utilities") pod "5f54a022-d676-4110-939c-d0f6cd75aeab" (UID: "5f54a022-d676-4110-939c-d0f6cd75aeab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.184377 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-catalog-content\") pod \"5f54a022-d676-4110-939c-d0f6cd75aeab\" (UID: \"5f54a022-d676-4110-939c-d0f6cd75aeab\") " Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.184686 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.190512 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f54a022-d676-4110-939c-d0f6cd75aeab-kube-api-access-jzxmv" (OuterVolumeSpecName: "kube-api-access-jzxmv") pod "5f54a022-d676-4110-939c-d0f6cd75aeab" (UID: "5f54a022-d676-4110-939c-d0f6cd75aeab"). InnerVolumeSpecName "kube-api-access-jzxmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.200709 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f54a022-d676-4110-939c-d0f6cd75aeab" (UID: "5f54a022-d676-4110-939c-d0f6cd75aeab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.285760 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f54a022-d676-4110-939c-d0f6cd75aeab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.285800 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzxmv\" (UniqueName: \"kubernetes.io/projected/5f54a022-d676-4110-939c-d0f6cd75aeab-kube-api-access-jzxmv\") on node \"crc\" DevicePath \"\"" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.706507 4948 generic.go:334] "Generic (PLEG): container finished" podID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerID="30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85" exitCode=0 Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.706618 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbk29" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.707721 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbk29" event={"ID":"5f54a022-d676-4110-939c-d0f6cd75aeab","Type":"ContainerDied","Data":"30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85"} Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.708003 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbk29" event={"ID":"5f54a022-d676-4110-939c-d0f6cd75aeab","Type":"ContainerDied","Data":"53ee10ee414ca5ec1a4e647339af2304062cb2898b8e976960083be513907692"} Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.708078 4948 scope.go:117] "RemoveContainer" containerID="30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.739667 4948 scope.go:117] "RemoveContainer" containerID="de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.761358 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbk29"] Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.782002 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbk29"] Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.786370 4948 scope.go:117] "RemoveContainer" containerID="31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.811453 4948 scope.go:117] "RemoveContainer" containerID="30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85" Nov 22 05:21:38 crc kubenswrapper[4948]: E1122 05:21:38.812407 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85\": container with ID starting with 30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85 not found: ID does not exist" containerID="30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.812459 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85"} err="failed to get container status \"30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85\": rpc error: code = NotFound desc = could not find container \"30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85\": container with ID starting with 30dd721d6e64f3d2cd01d5d9ab286b781e7fce0f0c0634eaf83d3936db1e2c85 not found: ID does not exist" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.812550 4948 scope.go:117] "RemoveContainer" containerID="de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722" Nov 22 05:21:38 crc kubenswrapper[4948]: E1122 05:21:38.813006 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722\": container with ID starting with de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722 not found: ID does not exist" containerID="de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.813043 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722"} err="failed to get container status \"de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722\": rpc error: code = NotFound desc = could not find container \"de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722\": container with ID starting with de9761246c82619ad346ebcbd2b7937a3d62c9f7f6e9d78ac1bf26cccc1f8722 not found: ID does not exist" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.813065 4948 scope.go:117] "RemoveContainer" containerID="31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3" Nov 22 05:21:38 crc kubenswrapper[4948]: E1122 05:21:38.813395 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3\": container with ID starting with 31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3 not found: ID does not exist" containerID="31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3" Nov 22 05:21:38 crc kubenswrapper[4948]: I1122 05:21:38.813489 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3"} err="failed to get container status \"31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3\": rpc error: code = NotFound desc = could not find container \"31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3\": container with ID starting with 31e197287250c100f36af7d38c444f9dcda59844b08eb2746315e3d81decdaa3 not found: ID does not exist" Nov 22 05:21:39 crc kubenswrapper[4948]: I1122 05:21:39.768388 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" path="/var/lib/kubelet/pods/5f54a022-d676-4110-939c-d0f6cd75aeab/volumes" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.118927 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9z9ld"] Nov 22 05:22:42 crc kubenswrapper[4948]: E1122 05:22:42.120413 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="extract-content" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.120444 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="extract-content" Nov 22 05:22:42 crc kubenswrapper[4948]: E1122 05:22:42.120500 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="registry-server" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.120517 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="registry-server" Nov 22 05:22:42 crc kubenswrapper[4948]: E1122 05:22:42.120541 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="extract-utilities" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.120566 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="extract-utilities" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.120850 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f54a022-d676-4110-939c-d0f6cd75aeab" containerName="registry-server" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.123915 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.145178 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9z9ld"] Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.262763 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-utilities\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.262926 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-catalog-content\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.262959 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk8dd\" (UniqueName: \"kubernetes.io/projected/331e3a4d-85fe-41f3-8197-977f13b1a0d9-kube-api-access-xk8dd\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.364229 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-utilities\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.364364 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-catalog-content\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.364393 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xk8dd\" (UniqueName: \"kubernetes.io/projected/331e3a4d-85fe-41f3-8197-977f13b1a0d9-kube-api-access-xk8dd\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.364919 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-catalog-content\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.365144 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-utilities\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.389486 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk8dd\" (UniqueName: \"kubernetes.io/projected/331e3a4d-85fe-41f3-8197-977f13b1a0d9-kube-api-access-xk8dd\") pod \"community-operators-9z9ld\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.456122 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:42 crc kubenswrapper[4948]: I1122 05:22:42.702501 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9z9ld"] Nov 22 05:22:43 crc kubenswrapper[4948]: I1122 05:22:43.191957 4948 generic.go:334] "Generic (PLEG): container finished" podID="331e3a4d-85fe-41f3-8197-977f13b1a0d9" containerID="8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe" exitCode=0 Nov 22 05:22:43 crc kubenswrapper[4948]: I1122 05:22:43.192022 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9z9ld" event={"ID":"331e3a4d-85fe-41f3-8197-977f13b1a0d9","Type":"ContainerDied","Data":"8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe"} Nov 22 05:22:43 crc kubenswrapper[4948]: I1122 05:22:43.192079 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9z9ld" event={"ID":"331e3a4d-85fe-41f3-8197-977f13b1a0d9","Type":"ContainerStarted","Data":"de167bede2fd000e378f1c6be747dfdbd34654da88994569e89ab700dd191a65"} Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.205340 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9z9ld" event={"ID":"331e3a4d-85fe-41f3-8197-977f13b1a0d9","Type":"ContainerStarted","Data":"623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4"} Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.521539 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-29r2s"] Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.523195 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.542055 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-29r2s"] Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.592491 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-catalog-content\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.592556 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxwsf\" (UniqueName: \"kubernetes.io/projected/356c29b0-8487-4843-a4e8-3fd0ab697dad-kube-api-access-rxwsf\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.592621 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-utilities\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.693917 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-utilities\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.694046 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-catalog-content\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.694099 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxwsf\" (UniqueName: \"kubernetes.io/projected/356c29b0-8487-4843-a4e8-3fd0ab697dad-kube-api-access-rxwsf\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.694684 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-utilities\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.694755 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-catalog-content\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.730010 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxwsf\" (UniqueName: \"kubernetes.io/projected/356c29b0-8487-4843-a4e8-3fd0ab697dad-kube-api-access-rxwsf\") pod \"certified-operators-29r2s\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:44 crc kubenswrapper[4948]: I1122 05:22:44.854899 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:45 crc kubenswrapper[4948]: I1122 05:22:45.132004 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-29r2s"] Nov 22 05:22:45 crc kubenswrapper[4948]: W1122 05:22:45.138584 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod356c29b0_8487_4843_a4e8_3fd0ab697dad.slice/crio-031008a71381676959dc9d1baaa2b126df533f49281a7f6aeeb2580921443830 WatchSource:0}: Error finding container 031008a71381676959dc9d1baaa2b126df533f49281a7f6aeeb2580921443830: Status 404 returned error can't find the container with id 031008a71381676959dc9d1baaa2b126df533f49281a7f6aeeb2580921443830 Nov 22 05:22:45 crc kubenswrapper[4948]: I1122 05:22:45.216256 4948 generic.go:334] "Generic (PLEG): container finished" podID="331e3a4d-85fe-41f3-8197-977f13b1a0d9" containerID="623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4" exitCode=0 Nov 22 05:22:45 crc kubenswrapper[4948]: I1122 05:22:45.216306 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9z9ld" event={"ID":"331e3a4d-85fe-41f3-8197-977f13b1a0d9","Type":"ContainerDied","Data":"623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4"} Nov 22 05:22:45 crc kubenswrapper[4948]: I1122 05:22:45.219345 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-29r2s" event={"ID":"356c29b0-8487-4843-a4e8-3fd0ab697dad","Type":"ContainerStarted","Data":"031008a71381676959dc9d1baaa2b126df533f49281a7f6aeeb2580921443830"} Nov 22 05:22:46 crc kubenswrapper[4948]: I1122 05:22:46.230309 4948 generic.go:334] "Generic (PLEG): container finished" podID="356c29b0-8487-4843-a4e8-3fd0ab697dad" containerID="626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318" exitCode=0 Nov 22 05:22:46 crc kubenswrapper[4948]: I1122 05:22:46.230393 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-29r2s" event={"ID":"356c29b0-8487-4843-a4e8-3fd0ab697dad","Type":"ContainerDied","Data":"626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318"} Nov 22 05:22:46 crc kubenswrapper[4948]: I1122 05:22:46.234890 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9z9ld" event={"ID":"331e3a4d-85fe-41f3-8197-977f13b1a0d9","Type":"ContainerStarted","Data":"446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b"} Nov 22 05:22:46 crc kubenswrapper[4948]: I1122 05:22:46.283610 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9z9ld" podStartSLOduration=1.832205385 podStartE2EDuration="4.283585655s" podCreationTimestamp="2025-11-22 05:22:42 +0000 UTC" firstStartedPulling="2025-11-22 05:22:43.194174519 +0000 UTC m=+2165.880185075" lastFinishedPulling="2025-11-22 05:22:45.645554799 +0000 UTC m=+2168.331565345" observedRunningTime="2025-11-22 05:22:46.280182918 +0000 UTC m=+2168.966193484" watchObservedRunningTime="2025-11-22 05:22:46.283585655 +0000 UTC m=+2168.969596211" Nov 22 05:22:47 crc kubenswrapper[4948]: I1122 05:22:47.243245 4948 generic.go:334] "Generic (PLEG): container finished" podID="356c29b0-8487-4843-a4e8-3fd0ab697dad" containerID="c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256" exitCode=0 Nov 22 05:22:47 crc kubenswrapper[4948]: I1122 05:22:47.244858 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-29r2s" event={"ID":"356c29b0-8487-4843-a4e8-3fd0ab697dad","Type":"ContainerDied","Data":"c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256"} Nov 22 05:22:48 crc kubenswrapper[4948]: I1122 05:22:48.261679 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-29r2s" event={"ID":"356c29b0-8487-4843-a4e8-3fd0ab697dad","Type":"ContainerStarted","Data":"367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3"} Nov 22 05:22:48 crc kubenswrapper[4948]: I1122 05:22:48.286031 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-29r2s" podStartSLOduration=2.895190887 podStartE2EDuration="4.286009495s" podCreationTimestamp="2025-11-22 05:22:44 +0000 UTC" firstStartedPulling="2025-11-22 05:22:46.232591059 +0000 UTC m=+2168.918601615" lastFinishedPulling="2025-11-22 05:22:47.623409677 +0000 UTC m=+2170.309420223" observedRunningTime="2025-11-22 05:22:48.283998418 +0000 UTC m=+2170.970008934" watchObservedRunningTime="2025-11-22 05:22:48.286009495 +0000 UTC m=+2170.972020041" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.519733 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vzdr8"] Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.521926 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.546369 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vzdr8"] Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.592305 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-utilities\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.592510 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x6sw\" (UniqueName: \"kubernetes.io/projected/9965feeb-89de-4ff5-8c8f-5e68edf28297-kube-api-access-6x6sw\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.592964 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-catalog-content\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.694877 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-catalog-content\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.694965 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-utilities\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.695031 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x6sw\" (UniqueName: \"kubernetes.io/projected/9965feeb-89de-4ff5-8c8f-5e68edf28297-kube-api-access-6x6sw\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.695522 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-catalog-content\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.695815 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-utilities\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.719651 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x6sw\" (UniqueName: \"kubernetes.io/projected/9965feeb-89de-4ff5-8c8f-5e68edf28297-kube-api-access-6x6sw\") pod \"redhat-operators-vzdr8\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:51 crc kubenswrapper[4948]: I1122 05:22:51.843756 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:22:52 crc kubenswrapper[4948]: I1122 05:22:52.073858 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vzdr8"] Nov 22 05:22:52 crc kubenswrapper[4948]: W1122 05:22:52.082045 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9965feeb_89de_4ff5_8c8f_5e68edf28297.slice/crio-70e174ddbc1bc9a497eeac9b43ef3e282268e1e3470fe6ebee2a9930c9ca5369 WatchSource:0}: Error finding container 70e174ddbc1bc9a497eeac9b43ef3e282268e1e3470fe6ebee2a9930c9ca5369: Status 404 returned error can't find the container with id 70e174ddbc1bc9a497eeac9b43ef3e282268e1e3470fe6ebee2a9930c9ca5369 Nov 22 05:22:52 crc kubenswrapper[4948]: I1122 05:22:52.285765 4948 generic.go:334] "Generic (PLEG): container finished" podID="9965feeb-89de-4ff5-8c8f-5e68edf28297" containerID="2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f" exitCode=0 Nov 22 05:22:52 crc kubenswrapper[4948]: I1122 05:22:52.285814 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzdr8" event={"ID":"9965feeb-89de-4ff5-8c8f-5e68edf28297","Type":"ContainerDied","Data":"2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f"} Nov 22 05:22:52 crc kubenswrapper[4948]: I1122 05:22:52.285852 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzdr8" event={"ID":"9965feeb-89de-4ff5-8c8f-5e68edf28297","Type":"ContainerStarted","Data":"70e174ddbc1bc9a497eeac9b43ef3e282268e1e3470fe6ebee2a9930c9ca5369"} Nov 22 05:22:52 crc kubenswrapper[4948]: I1122 05:22:52.456368 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:52 crc kubenswrapper[4948]: I1122 05:22:52.456926 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:52 crc kubenswrapper[4948]: I1122 05:22:52.509805 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:53 crc kubenswrapper[4948]: I1122 05:22:53.294360 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzdr8" event={"ID":"9965feeb-89de-4ff5-8c8f-5e68edf28297","Type":"ContainerStarted","Data":"3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e"} Nov 22 05:22:53 crc kubenswrapper[4948]: I1122 05:22:53.348096 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:54 crc kubenswrapper[4948]: I1122 05:22:54.303161 4948 generic.go:334] "Generic (PLEG): container finished" podID="9965feeb-89de-4ff5-8c8f-5e68edf28297" containerID="3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e" exitCode=0 Nov 22 05:22:54 crc kubenswrapper[4948]: I1122 05:22:54.303291 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzdr8" event={"ID":"9965feeb-89de-4ff5-8c8f-5e68edf28297","Type":"ContainerDied","Data":"3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e"} Nov 22 05:22:54 crc kubenswrapper[4948]: I1122 05:22:54.855860 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:54 crc kubenswrapper[4948]: I1122 05:22:54.856453 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:54 crc kubenswrapper[4948]: I1122 05:22:54.906669 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9z9ld"] Nov 22 05:22:54 crc kubenswrapper[4948]: I1122 05:22:54.915996 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:55 crc kubenswrapper[4948]: I1122 05:22:55.310358 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzdr8" event={"ID":"9965feeb-89de-4ff5-8c8f-5e68edf28297","Type":"ContainerStarted","Data":"d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd"} Nov 22 05:22:55 crc kubenswrapper[4948]: I1122 05:22:55.333103 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vzdr8" podStartSLOduration=1.904332391 podStartE2EDuration="4.333083273s" podCreationTimestamp="2025-11-22 05:22:51 +0000 UTC" firstStartedPulling="2025-11-22 05:22:52.288783117 +0000 UTC m=+2174.974793633" lastFinishedPulling="2025-11-22 05:22:54.717533999 +0000 UTC m=+2177.403544515" observedRunningTime="2025-11-22 05:22:55.330717386 +0000 UTC m=+2178.016727902" watchObservedRunningTime="2025-11-22 05:22:55.333083273 +0000 UTC m=+2178.019093809" Nov 22 05:22:55 crc kubenswrapper[4948]: I1122 05:22:55.357644 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.316604 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9z9ld" podUID="331e3a4d-85fe-41f3-8197-977f13b1a0d9" containerName="registry-server" containerID="cri-o://446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b" gracePeriod=2 Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.682828 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.762428 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-utilities\") pod \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.762528 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xk8dd\" (UniqueName: \"kubernetes.io/projected/331e3a4d-85fe-41f3-8197-977f13b1a0d9-kube-api-access-xk8dd\") pod \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.762582 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-catalog-content\") pod \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\" (UID: \"331e3a4d-85fe-41f3-8197-977f13b1a0d9\") " Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.763446 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-utilities" (OuterVolumeSpecName: "utilities") pod "331e3a4d-85fe-41f3-8197-977f13b1a0d9" (UID: "331e3a4d-85fe-41f3-8197-977f13b1a0d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.767757 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/331e3a4d-85fe-41f3-8197-977f13b1a0d9-kube-api-access-xk8dd" (OuterVolumeSpecName: "kube-api-access-xk8dd") pod "331e3a4d-85fe-41f3-8197-977f13b1a0d9" (UID: "331e3a4d-85fe-41f3-8197-977f13b1a0d9"). InnerVolumeSpecName "kube-api-access-xk8dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.821492 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "331e3a4d-85fe-41f3-8197-977f13b1a0d9" (UID: "331e3a4d-85fe-41f3-8197-977f13b1a0d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.864383 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.864626 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xk8dd\" (UniqueName: \"kubernetes.io/projected/331e3a4d-85fe-41f3-8197-977f13b1a0d9-kube-api-access-xk8dd\") on node \"crc\" DevicePath \"\"" Nov 22 05:22:56 crc kubenswrapper[4948]: I1122 05:22:56.864693 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/331e3a4d-85fe-41f3-8197-977f13b1a0d9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.327763 4948 generic.go:334] "Generic (PLEG): container finished" podID="331e3a4d-85fe-41f3-8197-977f13b1a0d9" containerID="446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b" exitCode=0 Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.327820 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9z9ld" event={"ID":"331e3a4d-85fe-41f3-8197-977f13b1a0d9","Type":"ContainerDied","Data":"446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b"} Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.327875 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9z9ld" event={"ID":"331e3a4d-85fe-41f3-8197-977f13b1a0d9","Type":"ContainerDied","Data":"de167bede2fd000e378f1c6be747dfdbd34654da88994569e89ab700dd191a65"} Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.327960 4948 scope.go:117] "RemoveContainer" containerID="446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.328841 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9z9ld" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.359284 4948 scope.go:117] "RemoveContainer" containerID="623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.372792 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9z9ld"] Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.380061 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9z9ld"] Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.382937 4948 scope.go:117] "RemoveContainer" containerID="8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.416264 4948 scope.go:117] "RemoveContainer" containerID="446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b" Nov 22 05:22:57 crc kubenswrapper[4948]: E1122 05:22:57.416706 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b\": container with ID starting with 446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b not found: ID does not exist" containerID="446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.416752 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b"} err="failed to get container status \"446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b\": rpc error: code = NotFound desc = could not find container \"446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b\": container with ID starting with 446803bfd0ff2f6043646e41915f029a1999df7d5b591e0e9a98a9f794829f0b not found: ID does not exist" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.416780 4948 scope.go:117] "RemoveContainer" containerID="623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4" Nov 22 05:22:57 crc kubenswrapper[4948]: E1122 05:22:57.417243 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4\": container with ID starting with 623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4 not found: ID does not exist" containerID="623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.417335 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4"} err="failed to get container status \"623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4\": rpc error: code = NotFound desc = could not find container \"623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4\": container with ID starting with 623bc05df1b52d0a79bb7e4ce89a3aff0bfe9a73b7fc52c1c1e77a163fccefe4 not found: ID does not exist" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.417402 4948 scope.go:117] "RemoveContainer" containerID="8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe" Nov 22 05:22:57 crc kubenswrapper[4948]: E1122 05:22:57.418324 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe\": container with ID starting with 8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe not found: ID does not exist" containerID="8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.418375 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe"} err="failed to get container status \"8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe\": rpc error: code = NotFound desc = could not find container \"8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe\": container with ID starting with 8e9c01e7c0c9a1d9bdfb380f1cfd996f87b4f4eadb7fcd33bcf18a3c6d9f0fbe not found: ID does not exist" Nov 22 05:22:57 crc kubenswrapper[4948]: I1122 05:22:57.772070 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="331e3a4d-85fe-41f3-8197-977f13b1a0d9" path="/var/lib/kubelet/pods/331e3a4d-85fe-41f3-8197-977f13b1a0d9/volumes" Nov 22 05:22:59 crc kubenswrapper[4948]: I1122 05:22:59.789633 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:22:59 crc kubenswrapper[4948]: I1122 05:22:59.790079 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:23:00 crc kubenswrapper[4948]: I1122 05:23:00.507909 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-29r2s"] Nov 22 05:23:00 crc kubenswrapper[4948]: I1122 05:23:00.508256 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-29r2s" podUID="356c29b0-8487-4843-a4e8-3fd0ab697dad" containerName="registry-server" containerID="cri-o://367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3" gracePeriod=2 Nov 22 05:23:00 crc kubenswrapper[4948]: I1122 05:23:00.921285 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.033576 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-catalog-content\") pod \"356c29b0-8487-4843-a4e8-3fd0ab697dad\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.033696 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-utilities\") pod \"356c29b0-8487-4843-a4e8-3fd0ab697dad\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.033784 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxwsf\" (UniqueName: \"kubernetes.io/projected/356c29b0-8487-4843-a4e8-3fd0ab697dad-kube-api-access-rxwsf\") pod \"356c29b0-8487-4843-a4e8-3fd0ab697dad\" (UID: \"356c29b0-8487-4843-a4e8-3fd0ab697dad\") " Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.035287 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-utilities" (OuterVolumeSpecName: "utilities") pod "356c29b0-8487-4843-a4e8-3fd0ab697dad" (UID: "356c29b0-8487-4843-a4e8-3fd0ab697dad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.042931 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/356c29b0-8487-4843-a4e8-3fd0ab697dad-kube-api-access-rxwsf" (OuterVolumeSpecName: "kube-api-access-rxwsf") pod "356c29b0-8487-4843-a4e8-3fd0ab697dad" (UID: "356c29b0-8487-4843-a4e8-3fd0ab697dad"). InnerVolumeSpecName "kube-api-access-rxwsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.091721 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "356c29b0-8487-4843-a4e8-3fd0ab697dad" (UID: "356c29b0-8487-4843-a4e8-3fd0ab697dad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.135220 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.135250 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356c29b0-8487-4843-a4e8-3fd0ab697dad-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.135260 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxwsf\" (UniqueName: \"kubernetes.io/projected/356c29b0-8487-4843-a4e8-3fd0ab697dad-kube-api-access-rxwsf\") on node \"crc\" DevicePath \"\"" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.358205 4948 generic.go:334] "Generic (PLEG): container finished" podID="356c29b0-8487-4843-a4e8-3fd0ab697dad" containerID="367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3" exitCode=0 Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.358248 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-29r2s" event={"ID":"356c29b0-8487-4843-a4e8-3fd0ab697dad","Type":"ContainerDied","Data":"367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3"} Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.358264 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-29r2s" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.358284 4948 scope.go:117] "RemoveContainer" containerID="367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.358273 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-29r2s" event={"ID":"356c29b0-8487-4843-a4e8-3fd0ab697dad","Type":"ContainerDied","Data":"031008a71381676959dc9d1baaa2b126df533f49281a7f6aeeb2580921443830"} Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.388093 4948 scope.go:117] "RemoveContainer" containerID="c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.392116 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-29r2s"] Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.396444 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-29r2s"] Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.409111 4948 scope.go:117] "RemoveContainer" containerID="626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.428848 4948 scope.go:117] "RemoveContainer" containerID="367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3" Nov 22 05:23:01 crc kubenswrapper[4948]: E1122 05:23:01.429271 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3\": container with ID starting with 367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3 not found: ID does not exist" containerID="367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.429312 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3"} err="failed to get container status \"367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3\": rpc error: code = NotFound desc = could not find container \"367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3\": container with ID starting with 367b31ad91475b2db07413db33eff2558a102c763f82f873d0b71dfd28d12ef3 not found: ID does not exist" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.429347 4948 scope.go:117] "RemoveContainer" containerID="c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256" Nov 22 05:23:01 crc kubenswrapper[4948]: E1122 05:23:01.429826 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256\": container with ID starting with c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256 not found: ID does not exist" containerID="c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.429855 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256"} err="failed to get container status \"c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256\": rpc error: code = NotFound desc = could not find container \"c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256\": container with ID starting with c786906ce792313fd550886d3f720a9142f1a8286006ba40d3fcea0f79afd256 not found: ID does not exist" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.429898 4948 scope.go:117] "RemoveContainer" containerID="626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318" Nov 22 05:23:01 crc kubenswrapper[4948]: E1122 05:23:01.431205 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318\": container with ID starting with 626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318 not found: ID does not exist" containerID="626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.431226 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318"} err="failed to get container status \"626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318\": rpc error: code = NotFound desc = could not find container \"626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318\": container with ID starting with 626e9d8f19a0c452229126ff3d866bc5a62b462b4f7acabd0848ce6f1624d318 not found: ID does not exist" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.784914 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="356c29b0-8487-4843-a4e8-3fd0ab697dad" path="/var/lib/kubelet/pods/356c29b0-8487-4843-a4e8-3fd0ab697dad/volumes" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.844682 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.845740 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:23:01 crc kubenswrapper[4948]: I1122 05:23:01.917635 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:23:02 crc kubenswrapper[4948]: I1122 05:23:02.436884 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:23:04 crc kubenswrapper[4948]: I1122 05:23:04.110139 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vzdr8"] Nov 22 05:23:05 crc kubenswrapper[4948]: I1122 05:23:05.388803 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vzdr8" podUID="9965feeb-89de-4ff5-8c8f-5e68edf28297" containerName="registry-server" containerID="cri-o://d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd" gracePeriod=2 Nov 22 05:23:05 crc kubenswrapper[4948]: I1122 05:23:05.839316 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:23:05 crc kubenswrapper[4948]: I1122 05:23:05.920859 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-utilities\") pod \"9965feeb-89de-4ff5-8c8f-5e68edf28297\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " Nov 22 05:23:05 crc kubenswrapper[4948]: I1122 05:23:05.920916 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6x6sw\" (UniqueName: \"kubernetes.io/projected/9965feeb-89de-4ff5-8c8f-5e68edf28297-kube-api-access-6x6sw\") pod \"9965feeb-89de-4ff5-8c8f-5e68edf28297\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " Nov 22 05:23:05 crc kubenswrapper[4948]: I1122 05:23:05.921005 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-catalog-content\") pod \"9965feeb-89de-4ff5-8c8f-5e68edf28297\" (UID: \"9965feeb-89de-4ff5-8c8f-5e68edf28297\") " Nov 22 05:23:05 crc kubenswrapper[4948]: I1122 05:23:05.925031 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-utilities" (OuterVolumeSpecName: "utilities") pod "9965feeb-89de-4ff5-8c8f-5e68edf28297" (UID: "9965feeb-89de-4ff5-8c8f-5e68edf28297"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:23:05 crc kubenswrapper[4948]: I1122 05:23:05.933769 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9965feeb-89de-4ff5-8c8f-5e68edf28297-kube-api-access-6x6sw" (OuterVolumeSpecName: "kube-api-access-6x6sw") pod "9965feeb-89de-4ff5-8c8f-5e68edf28297" (UID: "9965feeb-89de-4ff5-8c8f-5e68edf28297"). InnerVolumeSpecName "kube-api-access-6x6sw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.013770 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9965feeb-89de-4ff5-8c8f-5e68edf28297" (UID: "9965feeb-89de-4ff5-8c8f-5e68edf28297"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.023565 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.023645 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6x6sw\" (UniqueName: \"kubernetes.io/projected/9965feeb-89de-4ff5-8c8f-5e68edf28297-kube-api-access-6x6sw\") on node \"crc\" DevicePath \"\"" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.023661 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9965feeb-89de-4ff5-8c8f-5e68edf28297-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.399089 4948 generic.go:334] "Generic (PLEG): container finished" podID="9965feeb-89de-4ff5-8c8f-5e68edf28297" containerID="d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd" exitCode=0 Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.399188 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzdr8" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.399217 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzdr8" event={"ID":"9965feeb-89de-4ff5-8c8f-5e68edf28297","Type":"ContainerDied","Data":"d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd"} Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.399301 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzdr8" event={"ID":"9965feeb-89de-4ff5-8c8f-5e68edf28297","Type":"ContainerDied","Data":"70e174ddbc1bc9a497eeac9b43ef3e282268e1e3470fe6ebee2a9930c9ca5369"} Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.399334 4948 scope.go:117] "RemoveContainer" containerID="d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.428091 4948 scope.go:117] "RemoveContainer" containerID="3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.446546 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vzdr8"] Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.449116 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vzdr8"] Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.467067 4948 scope.go:117] "RemoveContainer" containerID="2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.491917 4948 scope.go:117] "RemoveContainer" containerID="d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd" Nov 22 05:23:06 crc kubenswrapper[4948]: E1122 05:23:06.492603 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd\": container with ID starting with d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd not found: ID does not exist" containerID="d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.492659 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd"} err="failed to get container status \"d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd\": rpc error: code = NotFound desc = could not find container \"d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd\": container with ID starting with d737ad352c45708fb4867a6413ac16429736addb00d7d32886122173d23a06bd not found: ID does not exist" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.492692 4948 scope.go:117] "RemoveContainer" containerID="3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e" Nov 22 05:23:06 crc kubenswrapper[4948]: E1122 05:23:06.493031 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e\": container with ID starting with 3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e not found: ID does not exist" containerID="3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.493072 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e"} err="failed to get container status \"3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e\": rpc error: code = NotFound desc = could not find container \"3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e\": container with ID starting with 3cd468f8a8f9c5d7f71198fbd9ef861b678aae8fa2cd74f9abc49e70fe56969e not found: ID does not exist" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.493099 4948 scope.go:117] "RemoveContainer" containerID="2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f" Nov 22 05:23:06 crc kubenswrapper[4948]: E1122 05:23:06.493323 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f\": container with ID starting with 2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f not found: ID does not exist" containerID="2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f" Nov 22 05:23:06 crc kubenswrapper[4948]: I1122 05:23:06.493359 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f"} err="failed to get container status \"2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f\": rpc error: code = NotFound desc = could not find container \"2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f\": container with ID starting with 2874705eea11c16bcd8ae8c1ade96ad6634b7a0be0a79cbff8181a5f2dfe1d5f not found: ID does not exist" Nov 22 05:23:07 crc kubenswrapper[4948]: I1122 05:23:07.765300 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9965feeb-89de-4ff5-8c8f-5e68edf28297" path="/var/lib/kubelet/pods/9965feeb-89de-4ff5-8c8f-5e68edf28297/volumes" Nov 22 05:23:30 crc kubenswrapper[4948]: I1122 05:23:30.044720 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:23:30 crc kubenswrapper[4948]: I1122 05:23:30.045226 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:23:59 crc kubenswrapper[4948]: I1122 05:23:59.790379 4948 patch_prober.go:28] interesting pod/machine-config-daemon-pf8gx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 05:23:59 crc kubenswrapper[4948]: I1122 05:23:59.791153 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 05:23:59 crc kubenswrapper[4948]: I1122 05:23:59.791226 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" Nov 22 05:23:59 crc kubenswrapper[4948]: I1122 05:23:59.792286 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"81f7f6918216866254a473493a577319e8267975e0e70f6ff3e2173c15978179"} pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 05:23:59 crc kubenswrapper[4948]: I1122 05:23:59.792405 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" podUID="126f010b-a640-4133-b63f-d2976da99215" containerName="machine-config-daemon" containerID="cri-o://81f7f6918216866254a473493a577319e8267975e0e70f6ff3e2173c15978179" gracePeriod=600 Nov 22 05:24:00 crc kubenswrapper[4948]: I1122 05:24:00.291952 4948 generic.go:334] "Generic (PLEG): container finished" podID="126f010b-a640-4133-b63f-d2976da99215" containerID="81f7f6918216866254a473493a577319e8267975e0e70f6ff3e2173c15978179" exitCode=0 Nov 22 05:24:00 crc kubenswrapper[4948]: I1122 05:24:00.292097 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerDied","Data":"81f7f6918216866254a473493a577319e8267975e0e70f6ff3e2173c15978179"} Nov 22 05:24:00 crc kubenswrapper[4948]: I1122 05:24:00.292395 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pf8gx" event={"ID":"126f010b-a640-4133-b63f-d2976da99215","Type":"ContainerStarted","Data":"b89c825870f256988a6991e2b000fbac6cdd7e0f1faa58ccfd73f54703b16b17"} Nov 22 05:24:00 crc kubenswrapper[4948]: I1122 05:24:00.292438 4948 scope.go:117] "RemoveContainer" containerID="9603bdfe6fbee154ca1c4360ada4e1e41fe6328093ee9f44fff14ea463e90754" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110244463024445 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110244464017363 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110237567016514 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110237567015464 5ustar corecore